diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f990d515908..a6f0b283ea2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ concurrency: env: # renovate: datasource=crate depName=diesel_cli versioning=semver - DIESEL_CLI_VERSION: 1.4.1 + DIESEL_CLI_VERSION: 2.0.1 # renovate: datasource=npm depName=pnpm PNPM_VERSION: 7.27.0 # renovate: datasource=github-releases depName=rust lookupName=rust-lang/rust diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 751c13aa7eb..738e466160b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,7 +10,7 @@ env: # renovate: datasource=crate depName=cargo-tarpaulin versioning=semver CARGO_TARPAULIN_VERSION: 0.25.0 # renovate: datasource=crate depName=diesel_cli versioning=semver - DIESEL_CLI_VERSION: 1.4.1 + DIESEL_CLI_VERSION: 2.0.1 # renovate: datasource=github-releases depName=rust lookupName=rust-lang/rust RUST_VERSION: 1.67.1 diff --git a/Cargo.lock b/Cargo.lock index 4d47493b7e3..65d80471d68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -447,7 +447,7 @@ dependencies = [ "threadpool", "tikv-jemallocator", "tokio", - "toml", + "toml 0.7.2", "tower", "tower-http", "tower-service", @@ -846,14 +846,15 @@ dependencies = [ [[package]] name = "diesel" -version = "1.4.8" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" dependencies = [ "bitflags", "byteorder", "chrono", "diesel_derives", + "itoa", "pq-sys", "r2d2", "serde_json", @@ -861,10 +862,11 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "1.4.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" +checksum = "143b758c91dbc3fe1fdcb0dba5bd13276c6a66422f2ef5795b58488248a310aa" dependencies = [ + "proc-macro-error", "proc-macro2", "quote", "syn", @@ -872,19 +874,20 @@ dependencies = [ [[package]] name = "diesel_full_text_search" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad3168d9d2008c58b8c9fabb79ddc38d1f9d511fa15e0dcbd6b987912b05783" +checksum = "f2ec394eff8a81dd1bbb1fc731581d07c46c3cba1c2c51246f5dabe03760e673" dependencies = [ "diesel", ] [[package]] name = "diesel_migrations" -version = "1.4.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf3cde8413353dc7f5d72fa8ce0b99a560a359d2c5ef1e5817ca731cd9008f4c" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" dependencies = [ + "diesel", "migrations_internals", "migrations_macros", ] @@ -1746,23 +1749,23 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "1.4.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b4fc84e4af020b837029e017966f86a1c2d5e83e64b589963d5047525995860" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" dependencies = [ - "diesel", + "serde", + "toml 0.5.11", ] [[package]] name = "migrations_macros" -version = "1.4.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9753f12909fd8d923f75ae5c3258cae1ed3c8ec052e1b38c93c21a6d157f789c" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" dependencies = [ "migrations_internals", "proc-macro2", "quote", - "syn", ] [[package]] @@ -3219,6 +3222,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + [[package]] name = "toml" version = "0.7.2" diff --git a/Cargo.toml b/Cargo.toml index b359b072f1d..8d5499c462b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,9 +36,9 @@ cookie = { version = "=0.17.0", features = ["secure"] } dashmap = { version = "=5.4.0", features = ["raw-api"] } derive_deref = "=1.1.1" dialoguer = "=0.10.3" -diesel = { version = "=1.4.8", features = ["postgres", "serde_json", "chrono", "r2d2"] } -diesel_full_text_search = "=1.0.1" -diesel_migrations = { version = "=1.4.0", features = ["postgres"] } +diesel = { version = "2", features = ["postgres", "serde_json", "chrono", "r2d2"] } +diesel_full_text_search = "2" +diesel_migrations = { version = "2", features = ["postgres"] } dotenv = "=0.15.0" flate2 = "=1.0.25" futures-channel = { version = "=0.3.26", default-features = false } @@ -90,6 +90,6 @@ tokio = "=1.25.0" tower-service = "=0.3.2" [build-dependencies] -diesel = { version = "=1.4.8", features = ["postgres"] } -diesel_migrations = { version = "=1.4.0", features = ["postgres"] } +diesel = { version = "2", features = ["postgres"] } +diesel_migrations = { version = "2", features = ["postgres"] } dotenv = "=0.15.0" diff --git a/backend.Dockerfile b/backend.Dockerfile index 83e62930da1..a92287a8dc1 100644 --- a/backend.Dockerfile +++ b/backend.Dockerfile @@ -4,7 +4,7 @@ ARG RUST_VERSION=1.67.1 FROM rust:$RUST_VERSION # renovate: datasource=crate depName=diesel_cli versioning=semver -ARG DIESEL_CLI_VERSION=1.4.1 +ARG DIESEL_CLI_VERSION=2.0.1 RUN apt-get update \ && apt-get install -y postgresql \ diff --git a/build.rs b/build.rs index 76f4cafb05e..7cf3f4caf1b 100644 --- a/build.rs +++ b/build.rs @@ -1,5 +1,5 @@ use diesel::prelude::*; -use diesel_migrations::run_pending_migrations; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; use std::env; fn main() { @@ -8,9 +8,13 @@ fn main() { println!("cargo:rerun-if-changed=migrations/"); if env::var("PROFILE") == Ok("debug".into()) { if let Ok(database_url) = dotenv::var("TEST_DATABASE_URL") { - let connection = PgConnection::establish(&database_url) + let connection = &mut PgConnection::establish(&database_url) .expect("Could not connect to TEST_DATABASE_URL"); - run_pending_migrations(&connection).expect("Error running migrations"); + let migrations = FileBasedMigrations::find_migrations_directory() + .expect("Could not find migrations"); + connection + .run_pending_migrations(migrations) + .expect("Error running migrations"); } } } diff --git a/diesel.toml b/diesel.toml index cb932d025a6..52fd076e430 100644 --- a/diesel.toml +++ b/diesel.toml @@ -3,5 +3,4 @@ [print_schema] file = "src/schema.rs" with_docs = true -import_types = ["diesel::sql_types::*", "diesel_full_text_search::{TsVector as Tsvector}"] patch_file = "src/schema.patch" diff --git a/src/admin/delete_crate.rs b/src/admin/delete_crate.rs index c9ba06ec424..10571ae6c63 100644 --- a/src/admin/delete_crate.rs +++ b/src/admin/delete_crate.rs @@ -19,15 +19,15 @@ pub struct Opts { } pub fn run(opts: Opts) { - let conn = db::oneoff_connection().unwrap(); - conn.transaction::<_, diesel::result::Error, _>(|| { - delete(opts, &conn); + let conn = &mut db::oneoff_connection().unwrap(); + conn.transaction::<_, diesel::result::Error, _>(|conn| { + delete(opts, conn); Ok(()) }) .unwrap() } -fn delete(opts: Opts, conn: &PgConnection) { +fn delete(opts: Opts, conn: &mut PgConnection) { let krate: Crate = Crate::by_name(&opts.crate_name).first(conn).unwrap(); let config = config::Base::from_environment(); diff --git a/src/admin/delete_version.rs b/src/admin/delete_version.rs index 867421730e6..8c4f5f02510 100644 --- a/src/admin/delete_version.rs +++ b/src/admin/delete_version.rs @@ -24,15 +24,15 @@ pub struct Opts { } pub fn run(opts: Opts) { - let conn = db::oneoff_connection().unwrap(); - conn.transaction::<_, diesel::result::Error, _>(|| { - delete(opts, &conn); + let conn = &mut db::oneoff_connection().unwrap(); + conn.transaction::<_, diesel::result::Error, _>(|conn| { + delete(opts, conn); Ok(()) }) .unwrap() } -fn delete(opts: Opts, conn: &PgConnection) { +fn delete(opts: Opts, conn: &mut PgConnection) { let krate: Crate = Crate::by_name(&opts.crate_name).first(conn).unwrap(); let v: Version = Version::belonging_to(&krate) .filter(versions::num.eq(&opts.version)) diff --git a/src/admin/enqueue_job.rs b/src/admin/enqueue_job.rs index a9e47fe19e4..a0a4275284f 100644 --- a/src/admin/enqueue_job.rs +++ b/src/admin/enqueue_job.rs @@ -26,7 +26,7 @@ pub enum Command { } pub fn run(command: Command) -> Result<()> { - let conn = db::oneoff_connection()?; + let conn = &mut db::oneoff_connection()?; println!("Enqueueing background job: {command:?}"); match command { @@ -34,22 +34,22 @@ pub fn run(command: Command) -> Result<()> { let count: i64 = background_jobs .filter(job_type.eq("update_downloads")) .count() - .get_result(&conn) + .get_result(conn) .unwrap(); if count > 0 { println!("Did not enqueue update_downloads, existing job already in progress"); Ok(()) } else { - Ok(worker::update_downloads().enqueue(&conn)?) + Ok(worker::update_downloads().enqueue(conn)?) } } Command::DumpDb { database_url, target_name, - } => Ok(worker::dump_db(database_url, target_name).enqueue(&conn)?), - Command::DailyDbMaintenance => Ok(worker::daily_db_maintenance().enqueue(&conn)?), - Command::SquashIndex => Ok(worker::squash_index().enqueue(&conn)?), - Command::NormalizeIndex { dry_run } => Ok(worker::normalize_index(dry_run).enqueue(&conn)?), + } => Ok(worker::dump_db(database_url, target_name).enqueue(conn)?), + Command::DailyDbMaintenance => Ok(worker::daily_db_maintenance().enqueue(conn)?), + Command::SquashIndex => Ok(worker::squash_index().enqueue(conn)?), + Command::NormalizeIndex { dry_run } => Ok(worker::normalize_index(dry_run).enqueue(conn)?), } } diff --git a/src/admin/git_import.rs b/src/admin/git_import.rs index bc32e43b818..ed3d35c7278 100644 --- a/src/admin/git_import.rs +++ b/src/admin/git_import.rs @@ -28,7 +28,7 @@ pub struct Opts { } pub fn run(opts: Opts) -> anyhow::Result<()> { - let conn = db::oneoff_connection().unwrap(); + let mut conn = db::oneoff_connection().unwrap(); println!("fetching git repo"); let config = RepositoryConfig::from_environment(); let repo = Repository::open(&config)?; @@ -53,10 +53,10 @@ pub fn run(opts: Opts) -> anyhow::Result<()> { } let file = File::open(path)?; let reader = BufReader::new(file); - let result = conn.transaction(|| -> anyhow::Result<()> { + let result = conn.transaction(|conn| -> anyhow::Result<()> { for line in reader.lines() { let krate: cargo_registry_index::Crate = serde_json::from_str(&line?)?; - import_data(&conn, &krate).with_context(|| { + import_data(conn, &krate).with_context(|| { format!("Failed to update crate {}#{}", krate.name, krate.vers) })? } @@ -71,7 +71,7 @@ pub fn run(opts: Opts) -> anyhow::Result<()> { Ok(()) } -fn import_data(conn: &PgConnection, krate: &cargo_registry_index::Crate) -> anyhow::Result<()> { +fn import_data(conn: &mut PgConnection, krate: &cargo_registry_index::Crate) -> anyhow::Result<()> { let version_id: i32 = versions::table .inner_join(crates::table) .filter(crates::name.eq(&krate.name)) diff --git a/src/admin/migrate.rs b/src/admin/migrate.rs index 89bf334c136..0ae9dc3f366 100644 --- a/src/admin/migrate.rs +++ b/src/admin/migrate.rs @@ -1,7 +1,11 @@ use anyhow::Error; +use diesel_migrations::{ + embed_migrations, EmbeddedMigrations, HarnessWithOutput, MigrationHarness, +}; static CATEGORIES_TOML: &str = include_str!("../boot/categories.toml"); -diesel_migrations::embed_migrations!("./migrations"); + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); #[derive(clap::Parser, Debug, Copy, Clone)] #[command( @@ -32,13 +36,17 @@ pub fn run(_opts: Opts) -> Result<(), Error> { } // The primary is online, access directly via `DATABASE_URL`. - let conn = crate::db::oneoff_connection_with_config(&config)?; + let conn = &mut crate::db::oneoff_connection_with_config(&config)?; info!("Migrating the database"); - embedded_migrations::run_with_output(&conn, &mut std::io::stdout())?; + let mut stdout = std::io::stdout(); + let mut harness = HarnessWithOutput::new(conn, &mut stdout); + harness + .run_pending_migrations(MIGRATIONS) + .expect("failed to run migrations"); info!("Synchronizing crate categories"); - crate::boot::categories::sync_with_connection(CATEGORIES_TOML, &conn).unwrap(); + crate::boot::categories::sync_with_connection(CATEGORIES_TOML, conn).unwrap(); Ok(()) } diff --git a/src/admin/populate.rs b/src/admin/populate.rs index c0a93149082..b3b64cc6ef8 100644 --- a/src/admin/populate.rs +++ b/src/admin/populate.rs @@ -14,11 +14,11 @@ pub struct Opts { } pub fn run(opts: Opts) { - let conn = db::oneoff_connection().unwrap(); - conn.transaction(|| update(opts, &conn)).unwrap(); + let mut conn = db::oneoff_connection().unwrap(); + conn.transaction(|conn| update(opts, conn)).unwrap(); } -fn update(opts: Opts, conn: &PgConnection) -> QueryResult<()> { +fn update(opts: Opts, conn: &mut PgConnection) -> QueryResult<()> { use diesel::dsl::*; for id in opts.version_ids { diff --git a/src/admin/render_readmes.rs b/src/admin/render_readmes.rs index 56a7fd81760..c5e02039ee4 100644 --- a/src/admin/render_readmes.rs +++ b/src/admin/render_readmes.rs @@ -9,7 +9,7 @@ use std::{io::Read, path::Path, sync::Arc, thread}; use cargo_registry_markdown::text_to_html; use chrono::{TimeZone, Utc}; -use diesel::{dsl::any, prelude::*}; +use diesel::prelude::*; use flate2::read::GzDecoder; use reqwest::{blocking::Client, header}; use tar::{self, Archive}; @@ -39,7 +39,7 @@ pub struct Opts { pub fn run(opts: Opts) -> anyhow::Result<()> { let base_config = Arc::new(config::Base::from_environment()); - let conn = db::oneoff_connection().unwrap(); + let conn = &mut db::oneoff_connection().unwrap(); let start_time = Utc::now(); @@ -70,7 +70,7 @@ pub fn run(opts: Opts) -> anyhow::Result<()> { query = query.filter(crates::name.eq(crate_name)); } - let version_ids: Vec = query.load(&conn).expect("error loading version ids"); + let version_ids: Vec = query.load(conn).expect("error loading version ids"); let total_versions = version_ids.len(); println!("Rendering {total_versions} versions"); @@ -95,14 +95,14 @@ pub fn run(opts: Opts) -> anyhow::Result<()> { let versions: Vec<(Version, String)> = versions::table .inner_join(crates::table) - .filter(versions::id.eq(any(version_ids_chunk))) + .filter(versions::id.eq_any(version_ids_chunk)) .select((versions::all_columns, crates::name)) - .load(&conn) + .load(conn) .expect("error loading versions"); let mut tasks = Vec::with_capacity(page_size); for (version, krate_name) in versions { - Version::record_readme_rendering(version.id, &conn) + Version::record_readme_rendering(version.id, conn) .context("Couldn't record rendering time")?; let client = client.clone(); diff --git a/src/admin/transfer_crates.rs b/src/admin/transfer_crates.rs index 9b61a1b157a..2bfb21b8b77 100644 --- a/src/admin/transfer_crates.rs +++ b/src/admin/transfer_crates.rs @@ -21,15 +21,15 @@ pub struct Opts { } pub fn run(opts: Opts) { - let conn = db::oneoff_connection().unwrap(); - conn.transaction::<_, diesel::result::Error, _>(|| { - transfer(opts, &conn); + let conn = &mut db::oneoff_connection().unwrap(); + conn.transaction::<_, diesel::result::Error, _>(|conn| { + transfer(opts, conn); Ok(()) }) .unwrap() } -fn transfer(opts: Opts, conn: &PgConnection) { +fn transfer(opts: Opts, conn: &mut PgConnection) { let from: User = users::table .filter(users::gh_login.eq(opts.from_user)) .first(conn) diff --git a/src/admin/verify_token.rs b/src/admin/verify_token.rs index 2f3d0cf033f..1c0d6cb24e4 100644 --- a/src/admin/verify_token.rs +++ b/src/admin/verify_token.rs @@ -13,8 +13,8 @@ pub struct Opts { } pub fn run(opts: Opts) -> AppResult<()> { - let conn = db::oneoff_connection()?; - let user = User::find_by_api_token(&conn, &opts.api_token)?; + let conn = &mut db::oneoff_connection()?; + let user = User::find_by_api_token(conn, &opts.api_token)?; println!("The token belongs to user {}", user.gh_login); Ok(()) } diff --git a/src/admin/yank_version.rs b/src/admin/yank_version.rs index 7bfb8d54ae3..7ec3c0115f1 100644 --- a/src/admin/yank_version.rs +++ b/src/admin/yank_version.rs @@ -23,15 +23,15 @@ pub struct Opts { } pub fn run(opts: Opts) { - let conn = db::oneoff_connection().unwrap(); - conn.transaction::<_, diesel::result::Error, _>(|| { - yank(opts, &conn); + let mut conn = db::oneoff_connection().unwrap(); + conn.transaction::<_, diesel::result::Error, _>(|conn| { + yank(opts, conn); Ok(()) }) .unwrap() } -fn yank(opts: Opts, conn: &PgConnection) { +fn yank(opts: Opts, conn: &mut PgConnection) { let Opts { crate_name, version, diff --git a/src/auth.rs b/src/auth.rs index 3e66bcaa93f..0365a79c387 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -58,7 +58,7 @@ impl AuthCheck { pub fn check( &self, request: &T, - conn: &PgConnection, + conn: &mut PgConnection, ) -> AppResult { let auth = authenticate(request, conn)?; @@ -158,7 +158,7 @@ impl Authentication { fn authenticate_via_cookie( req: &T, - conn: &PgConnection, + conn: &mut PgConnection, ) -> AppResult> { let user_id_from_session = req .session() @@ -179,7 +179,7 @@ fn authenticate_via_cookie( fn authenticate_via_token( req: &T, - conn: &PgConnection, + conn: &mut PgConnection, ) -> AppResult> { let maybe_authorization = req .headers() @@ -207,7 +207,7 @@ fn authenticate_via_token( Ok(Some(TokenAuthentication { user, token })) } -fn authenticate(req: &T, conn: &PgConnection) -> AppResult { +fn authenticate(req: &T, conn: &mut PgConnection) -> AppResult { controllers::util::verify_origin(req)?; match authenticate_via_cookie(req, conn) { diff --git a/src/background_jobs.rs b/src/background_jobs.rs index 5611a5e906d..3e09ed14a10 100644 --- a/src/background_jobs.rs +++ b/src/background_jobs.rs @@ -30,7 +30,7 @@ pub(crate) struct PerformState<'a> { /// /// Most jobs can reuse the existing connection, however it will already be within a /// transaction and is thus not appropriate in all cases. - pub(crate) conn: &'a PgConnection, + pub(crate) conn: &'a mut PgConnection, /// A connection pool for obtaining a unique connection. /// /// This will be `None` within our standard test framework, as there everything is expected to @@ -77,7 +77,7 @@ impl Job { } } - pub fn enqueue(&self, conn: &PgConnection) -> Result<(), EnqueueError> { + pub fn enqueue(&self, conn: &mut PgConnection) -> Result<(), EnqueueError> { use crate::schema::background_jobs::dsl::*; let job_data = self.to_value()?; @@ -117,7 +117,7 @@ impl Job { .expect("Application should configure a background runner environment"); match self { Job::DailyDbMaintenance => { - worker::perform_daily_db_maintenance(&*fresh_connection(pool)?) + worker::perform_daily_db_maintenance(&mut *fresh_connection(pool)?) } Job::DumpDb(args) => worker::perform_dump_db(env, args.database_url, args.target_name), Job::IndexAddCrate(args) => worker::perform_index_add_crate(env, conn, &args.krate), @@ -136,7 +136,7 @@ impl Job { args.base_url.as_deref(), args.pkg_path_in_vcs.as_deref(), ), - Job::UpdateDownloads => worker::perform_update_downloads(&*fresh_connection(pool)?), + Job::UpdateDownloads => worker::perform_update_downloads(&mut *fresh_connection(pool)?), } } } diff --git a/src/bin/monitor.rs b/src/bin/monitor.rs index 68136d7098d..8c4494dc776 100644 --- a/src/bin/monitor.rs +++ b/src/bin/monitor.rs @@ -11,11 +11,11 @@ use cargo_registry::{admin::on_call, db, schema::*}; use diesel::prelude::*; fn main() -> Result<()> { - let conn = db::oneoff_connection()?; + let conn = &mut db::oneoff_connection()?; - check_failing_background_jobs(&conn)?; - check_stalled_update_downloads(&conn)?; - check_spam_attack(&conn)?; + check_failing_background_jobs(conn)?; + check_stalled_update_downloads(conn)?; + check_spam_attack(conn)?; Ok(()) } @@ -27,7 +27,7 @@ fn main() -> Result<()> { /// /// Within the default 15 minute time, a job should have already had several /// failed retry attempts. -fn check_failing_background_jobs(conn: &PgConnection) -> Result<()> { +fn check_failing_background_jobs(conn: &mut PgConnection) -> Result<()> { use cargo_registry::schema::background_jobs::dsl::*; use diesel::dsl::*; use diesel::sql_types::Integer; @@ -69,7 +69,7 @@ fn check_failing_background_jobs(conn: &PgConnection) -> Result<()> { } /// Check for an `update_downloads` job that has run longer than expected -fn check_stalled_update_downloads(conn: &PgConnection) -> Result<()> { +fn check_stalled_update_downloads(conn: &mut PgConnection) -> Result<()> { use cargo_registry::schema::background_jobs::dsl::*; use chrono::{DateTime, NaiveDateTime, Utc}; @@ -106,9 +106,8 @@ fn check_stalled_update_downloads(conn: &PgConnection) -> Result<()> { } /// Check for known spam patterns -fn check_spam_attack(conn: &PgConnection) -> Result<()> { +fn check_spam_attack(conn: &mut PgConnection) -> Result<()> { use cargo_registry::sql::canon_crate_name; - use diesel::dsl::*; const EVENT_KEY: &str = "spam_attack"; @@ -123,7 +122,7 @@ fn check_spam_attack(conn: &PgConnection) -> Result<()> { let mut event_description = None; let bad_crate: Option = crates::table - .filter(canon_crate_name(crates::name).eq(any(bad_crate_names))) + .filter(canon_crate_name(crates::name).eq_any(bad_crate_names)) .select(crates::name) .first(conn) .optional()?; diff --git a/src/boot/categories.rs b/src/boot/categories.rs index 9d00cd9370b..849a04d67f8 100644 --- a/src/boot/categories.rs +++ b/src/boot/categories.rs @@ -75,9 +75,8 @@ fn categories_from_toml( Ok(result) } -pub fn sync_with_connection(toml_str: &str, conn: &PgConnection) -> Result<()> { +pub fn sync_with_connection(toml_str: &str, conn: &mut PgConnection) -> Result<()> { use crate::schema::categories::dsl::*; - use diesel::dsl::all; use diesel::pg::upsert::excluded; let toml: toml::value::Table = @@ -95,7 +94,7 @@ pub fn sync_with_connection(toml_str: &str, conn: &PgConnection) -> Result<()> { }) .collect::>(); - conn.transaction(|| { + conn.transaction(|conn| { let slugs: Vec = diesel::insert_into(categories) .values(&to_insert) .on_conflict(slug) @@ -108,7 +107,7 @@ pub fn sync_with_connection(toml_str: &str, conn: &PgConnection) -> Result<()> { .get_results(conn)?; diesel::delete(categories) - .filter(slug.ne(all(slugs))) + .filter(slug.ne_all(slugs)) .execute(conn)?; Ok(()) }) diff --git a/src/controllers/category.rs b/src/controllers/category.rs index ada066845f4..34532168b8e 100644 --- a/src/controllers/category.rs +++ b/src/controllers/category.rs @@ -16,16 +16,15 @@ pub async fn index(app: AppState, req: Parts) -> AppResult> { let offset = options.offset().unwrap_or_default(); let sort = query.get("sort").map_or("alpha", String::as_str); - let conn = app.db_read()?; - let categories = - Category::toplevel(&conn, sort, i64::from(options.per_page), i64::from(offset))?; + let conn = &mut app.db_read()?; + let categories = Category::toplevel(conn, sort, options.per_page, offset)?; let categories = categories .into_iter() .map(Category::into) .collect::>(); // Query for the total count of categories - let total = Category::count_toplevel(&conn)?; + let total = Category::count_toplevel(conn)?; Ok(Json(json!({ "categories": categories, @@ -38,15 +37,15 @@ pub async fn index(app: AppState, req: Parts) -> AppResult> { /// Handles the `GET /categories/:category_id` route. pub async fn show(state: AppState, Path(slug): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; - let cat: Category = Category::by_slug(&slug).first(&*conn)?; + let conn = &mut *state.db_read()?; + let cat: Category = Category::by_slug(&slug).first(conn)?; let subcats = cat - .subcategories(&conn)? + .subcategories(conn)? .into_iter() .map(Category::into) .collect(); let parents = cat - .parent_categories(&conn)? + .parent_categories(conn)? .into_iter() .map(Category::into) .collect(); @@ -71,11 +70,11 @@ pub async fn show(state: AppState, Path(slug): Path) -> AppResult AppResult> { conduit_compat(move || { - let conn = state.db_read()?; + let conn = &mut *state.db_read()?; let slugs: Vec = categories::table .select((categories::slug, categories::slug, categories::description)) .order(categories::slug) - .load(&*conn)?; + .load(conn)?; #[derive(Serialize, Queryable)] struct Slug { diff --git a/src/controllers/crate_owner_invitation.rs b/src/controllers/crate_owner_invitation.rs index 65542aa2cd9..3c6a1b6f859 100644 --- a/src/controllers/crate_owner_invitation.rs +++ b/src/controllers/crate_owner_invitation.rs @@ -18,13 +18,13 @@ use std::collections::{HashMap, HashSet}; /// Handles the `GET /api/v1/me/crate_owner_invitations` route. pub async fn list(app: AppState, req: Parts) -> AppResult> { conduit_compat(move || { - let conn = app.db_read()?; - let auth = AuthCheck::only_cookie().check(&req, &conn)?; + let conn = &mut app.db_read()?; + let auth = AuthCheck::only_cookie().check(&req, conn)?; let user_id = auth.user_id(); let PrivateListResponse { invitations, users, .. - } = prepare_list(&app, &req, auth, ListFilter::InviteeId(user_id), &conn)?; + } = prepare_list(&app, &req, auth, ListFilter::InviteeId(user_id), conn)?; // The schema for the private endpoints is converted to the schema used by v1 endpoints. let crate_owner_invitations = invitations @@ -58,8 +58,8 @@ pub async fn list(app: AppState, req: Parts) -> AppResult> { /// Handles the `GET /api/private/crate_owner_invitations` route. pub async fn private_list(app: AppState, req: Parts) -> AppResult> { conduit_compat(move || { - let conn = app.db_read()?; - let auth = AuthCheck::only_cookie().check(&req, &conn)?; + let conn = &mut app.db_read()?; + let auth = AuthCheck::only_cookie().check(&req, conn)?; let filter = if let Some(crate_name) = req.query().get("crate_name") { ListFilter::CrateName(crate_name.clone()) @@ -69,7 +69,7 @@ pub async fn private_list(app: AppState, req: Parts) -> AppResult AppResult { let pagination: PaginationOptions = PaginationOptions::builder() .enable_pages(false) @@ -133,7 +133,7 @@ fn prepare_list( crate_owner_invitations::invited_user_id, )) // We fetch one element over the page limit to then detect whether there is a next page. - .limit(pagination.per_page as i64 + 1); + .limit(pagination.per_page + 1); // Load and paginate the results. let mut raw_invitations: Vec = match pagination.page { @@ -265,18 +265,18 @@ pub async fn handle_invite(state: AppState, req: BytesRequest) -> AppResult AppResult> { conduit_compat(move || { let config = &state.config; - let conn = state.db_write()?; + let conn = &mut state.db_write()?; - let invitation = CrateOwnerInvitation::find_by_token(&token, &conn)?; + let invitation = CrateOwnerInvitation::find_by_token(&token, conn)?; let crate_id = invitation.crate_id; - invitation.accept(&conn, config)?; + invitation.accept(conn, config)?; Ok(Json(json!({ "crate_owner_invitation": { diff --git a/src/controllers/github/secret_scanning.rs b/src/controllers/github/secret_scanning.rs index 571cfc4b726..d73f6704d24 100644 --- a/src/controllers/github/secret_scanning.rs +++ b/src/controllers/github/secret_scanning.rs @@ -155,14 +155,14 @@ fn alert_revoke_token( state: &AppState, alert: &GitHubSecretAlert, ) -> Result { - let conn = state.db_write()?; + let conn = &mut *state.db_write()?; let hashed_token = SecureToken::hash(&alert.token); // Not using `ApiToken::find_by_api_token()` in order to preserve `last_used_at` let token = api_tokens::table .filter(api_tokens::token.eq(hashed_token)) - .get_result::(&*conn) + .get_result::(conn) .optional()?; let Some(token) = token else { @@ -180,14 +180,14 @@ fn alert_revoke_token( diesel::update(&token) .set(api_tokens::revoked.eq(true)) - .execute(&*conn)?; + .execute(conn)?; warn!( token_id = %token.id, user_id = %token.user_id, "Active API token received and revoked (true positive)", ); - if let Err(error) = send_notification_email(&token, alert, state, &conn) { + if let Err(error) = send_notification_email(&token, alert, state, conn) { warn!( token_id = %token.id, user_id = %token.user_id, ?error, "Failed to send email notification", @@ -201,7 +201,7 @@ fn send_notification_email( token: &ApiToken, alert: &GitHubSecretAlert, state: &AppState, - conn: &PgConnection, + conn: &mut PgConnection, ) -> anyhow::Result<()> { let user = User::find(conn, token.user_id).context("Failed to find user")?; let Some(email) = user.email(conn)? else { diff --git a/src/controllers/helpers/pagination.rs b/src/controllers/helpers/pagination.rs index f4f1716cc0b..bdaba011dda 100644 --- a/src/controllers/helpers/pagination.rs +++ b/src/controllers/helpers/pagination.rs @@ -17,8 +17,8 @@ use std::net::IpAddr; use std::str::FromStr; const MAX_PAGE_BEFORE_SUSPECTED_BOT: u32 = 10; -const DEFAULT_PER_PAGE: u32 = 10; -const MAX_PER_PAGE: u32 = 100; +const DEFAULT_PER_PAGE: i64 = 10; +const MAX_PER_PAGE: i64 = 100; #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum Page { @@ -30,7 +30,7 @@ pub(crate) enum Page { #[derive(Debug, Clone)] pub(crate) struct PaginationOptions { pub(crate) page: Page, - pub(crate) per_page: u32, + pub(crate) per_page: i64, } impl PaginationOptions { @@ -42,9 +42,9 @@ impl PaginationOptions { } } - pub(crate) fn offset(&self) -> Option { + pub(crate) fn offset(&self) -> Option { if let Page::Numeric(p) = self.page { - Some((p - 1) * self.per_page) + Some((p - 1) as i64 * self.per_page) } else { None } @@ -204,12 +204,12 @@ pub(crate) struct PaginatedQuery { } impl PaginatedQuery { - pub(crate) fn load(self, conn: &PgConnection) -> QueryResult> + pub(crate) fn load<'a, U>(self, conn: &mut PgConnection) -> QueryResult> where - Self: LoadQuery>, + Self: LoadQuery<'a, PgConnection, WithCount>, { let options = self.options.clone(); - let records_and_total = self.internal_load(conn)?; + let records_and_total = self.internal_load(conn)?.collect::>()?; Ok(Paginated { records_and_total, options, @@ -232,14 +232,13 @@ impl QueryFragment for PaginatedQuery where T: QueryFragment, { - fn walk_ast(&self, mut out: AstPass<'_, Pg>) -> QueryResult<()> { + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { out.push_sql("SELECT *, COUNT(*) OVER () FROM ("); self.query.walk_ast(out.reborrow())?; out.push_sql(") t LIMIT "); - out.push_bind_param::(&i64::from(self.options.per_page))?; + out.push_bind_param::(&self.options.per_page)?; if let Some(offset) = self.options.offset() { - out.push_sql(" OFFSET "); - out.push_bind_param::(&i64::from(offset))?; + out.push_sql(format!(" OFFSET {offset}").as_str()); } Ok(()) } diff --git a/src/controllers/keyword.rs b/src/controllers/keyword.rs index ac4d1d78b23..66836049602 100644 --- a/src/controllers/keyword.rs +++ b/src/controllers/keyword.rs @@ -26,8 +26,8 @@ pub async fn index(state: AppState, qp: Query, req: Parts) -> AppRes }; let query = query.pages_pagination(PaginationOptions::builder().gather(&req)?); - let conn = state.db_read()?; - let data: Paginated = query.load(&conn)?; + let conn = &mut state.db_read()?; + let data: Paginated = query.load(conn)?; let total = data.total(); let kws = data .into_iter() @@ -45,9 +45,9 @@ pub async fn index(state: AppState, qp: Query, req: Parts) -> AppRes /// Handles the `GET /keywords/:keyword_id` route. pub async fn show(Path(name): Path, state: AppState) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; + let conn = &mut state.db_read()?; - let kw = Keyword::find_by_keyword(&conn, &name)?; + let kw = Keyword::find_by_keyword(conn, &name)?; Ok(Json(json!({ "keyword": EncodableKeyword::from(kw) }))) }) diff --git a/src/controllers/krate/downloads.rs b/src/controllers/krate/downloads.rs index 48d4a707df2..9b22ed0ae6b 100644 --- a/src/controllers/krate/downloads.rs +++ b/src/controllers/krate/downloads.rs @@ -18,10 +18,10 @@ pub async fn downloads(state: AppState, Path(crate_name): Path) -> AppRe use diesel::dsl::*; use diesel::sql_types::BigInt; - let conn = state.db_read()?; - let krate: Crate = Crate::by_name(&crate_name).first(&*conn)?; + let conn = &mut *state.db_read()?; + let krate: Crate = Crate::by_name(&crate_name).first(conn)?; - let mut versions: Vec = krate.all_versions().load(&*conn)?; + let mut versions: Vec = krate.all_versions().load(conn)?; versions .sort_by_cached_key(|version| cmp::Reverse(semver::Version::parse(&version.num).ok())); let (latest_five, rest) = versions.split_at(cmp::min(5, versions.len())); @@ -29,7 +29,7 @@ pub async fn downloads(state: AppState, Path(crate_name): Path) -> AppRe let downloads = VersionDownload::belonging_to(latest_five) .filter(version_downloads::date.gt(date(now - 90.days()))) .order(version_downloads::date.asc()) - .load(&*conn)? + .load(conn)? .into_iter() .map(VersionDownload::into) .collect::>(); @@ -43,7 +43,7 @@ pub async fn downloads(state: AppState, Path(crate_name): Path) -> AppRe .filter(version_downloads::date.gt(date(now - 90.days()))) .group_by(version_downloads::date) .order(version_downloads::date.asc()) - .load(&*conn)?; + .load(conn)?; #[derive(Serialize, Queryable)] struct ExtraDownload { diff --git a/src/controllers/krate/follow.rs b/src/controllers/krate/follow.rs index 19fdb2dd605..e25822f8df4 100644 --- a/src/controllers/krate/follow.rs +++ b/src/controllers/krate/follow.rs @@ -4,14 +4,11 @@ use crate::auth::AuthCheck; use diesel::associations::Identifiable; use crate::controllers::frontend_prelude::*; -use crate::db::DieselPooledConn; use crate::models::{Crate, Follow}; use crate::schema::*; -fn follow_target(crate_name: &str, conn: &DieselPooledConn<'_>, user_id: i32) -> AppResult { - let crate_id = Crate::by_name(crate_name) - .select(crates::id) - .first(&**conn)?; +fn follow_target(crate_name: &str, conn: &mut PgConnection, user_id: i32) -> AppResult { + let crate_id = Crate::by_name(crate_name).select(crates::id).first(conn)?; Ok(Follow { user_id, crate_id }) } @@ -22,13 +19,13 @@ pub async fn follow( req: Parts, ) -> AppResult { conduit_compat(move || { - let conn = app.db_write()?; - let user_id = AuthCheck::default().check(&req, &conn)?.user_id(); - let follow = follow_target(&crate_name, &conn, user_id)?; + let conn = &mut *app.db_write()?; + let user_id = AuthCheck::default().check(&req, conn)?.user_id(); + let follow = follow_target(&crate_name, conn, user_id)?; diesel::insert_into(follows::table) .values(&follow) .on_conflict_do_nothing() - .execute(&*conn)?; + .execute(conn)?; ok_true() }) @@ -42,10 +39,10 @@ pub async fn unfollow( req: Parts, ) -> AppResult { conduit_compat(move || { - let conn = app.db_write()?; - let user_id = AuthCheck::default().check(&req, &conn)?.user_id(); - let follow = follow_target(&crate_name, &conn, user_id)?; - diesel::delete(&follow).execute(&*conn)?; + let conn = &mut *app.db_write()?; + let user_id = AuthCheck::default().check(&req, conn)?.user_id(); + let follow = follow_target(&crate_name, conn, user_id)?; + diesel::delete(&follow).execute(conn)?; ok_true() }) @@ -61,11 +58,11 @@ pub async fn following( conduit_compat(move || { use diesel::dsl::exists; - let conn = app.db_read_prefer_primary()?; - let user_id = AuthCheck::only_cookie().check(&req, &conn)?.user_id(); - let follow = follow_target(&crate_name, &conn, user_id)?; + let conn = &mut *app.db_read_prefer_primary()?; + let user_id = AuthCheck::only_cookie().check(&req, conn)?.user_id(); + let follow = follow_target(&crate_name, conn, user_id)?; let following = - diesel::select(exists(follows::table.find(follow.id()))).get_result::(&*conn)?; + diesel::select(exists(follows::table.find(follow.id()))).get_result::(conn)?; Ok(Json(json!({ "following": following }))) }) diff --git a/src/controllers/krate/metadata.rs b/src/controllers/krate/metadata.rs index e0f85892c62..16d24590ca3 100644 --- a/src/controllers/krate/metadata.rs +++ b/src/controllers/krate/metadata.rs @@ -25,22 +25,24 @@ use crate::models::krate::ALL_COLUMNS; pub async fn summary(state: AppState) -> AppResult> { conduit_compat(move || { use crate::schema::crates::dsl::*; - use diesel::dsl::all; let config = &state.config; - let conn = state.db_read()?; - let num_crates: i64 = crates.count().get_result(&*conn)?; + let conn = &mut *state.db_read()?; + let num_crates: i64 = crates.count().get_result(conn)?; let num_downloads: i64 = metadata::table .select(metadata::total_downloads) - .get_result(&*conn)?; + .get_result(conn)?; - let encode_crates = |data: Vec<(Crate, Option)>| -> AppResult> { + fn encode_crates( + conn: &mut PgConnection, + data: Vec<(Crate, Option)>, + ) -> AppResult> { let recent_downloads = data.iter().map(|&(_, s)| s).collect::>(); let krates = data.into_iter().map(|(c, _)| c).collect::>(); - let versions: Vec = krates.versions().load(&*conn)?; + let versions: Vec = krates.versions().load(conn)?; versions .grouped_by(&krates) .into_iter() @@ -57,7 +59,7 @@ pub async fn summary(state: AppState) -> AppResult> { )) }) .collect() - }; + } let selection = (ALL_COLUMNS, recent_crate_downloads::downloads.nullable()); @@ -66,49 +68,49 @@ pub async fn summary(state: AppState) -> AppResult> { .order(created_at.desc()) .select(selection) .limit(10) - .load(&*conn)?; + .load(conn)?; let just_updated = crates .left_join(recent_crate_downloads::table) .filter(updated_at.ne(created_at)) .order(updated_at.desc()) .select(selection) .limit(10) - .load(&*conn)?; + .load(conn)?; let mut most_downloaded_query = crates.left_join(recent_crate_downloads::table).into_boxed(); if !config.excluded_crate_names.is_empty() { most_downloaded_query = - most_downloaded_query.filter(name.ne(all(&config.excluded_crate_names))); + most_downloaded_query.filter(name.ne_all(&config.excluded_crate_names)); } let most_downloaded = most_downloaded_query .then_order_by(downloads.desc()) .select(selection) .limit(10) - .load(&*conn)?; + .load(conn)?; let mut most_recently_downloaded_query = crates .inner_join(recent_crate_downloads::table) .into_boxed(); if !config.excluded_crate_names.is_empty() { most_recently_downloaded_query = - most_recently_downloaded_query.filter(name.ne(all(&config.excluded_crate_names))); + most_recently_downloaded_query.filter(name.ne_all(&config.excluded_crate_names)); } let most_recently_downloaded = most_recently_downloaded_query .then_order_by(recent_crate_downloads::downloads.desc()) .select(selection) .limit(10) - .load(&*conn)?; + .load(conn)?; let popular_keywords = keywords::table .order(keywords::crates_cnt.desc()) .limit(10) - .load(&*conn)? + .load(conn)? .into_iter() .map(Keyword::into) .collect::>(); - let popular_categories = Category::toplevel(&conn, "crates", 10, 0)? + let popular_categories = Category::toplevel(conn, "crates", 10, 0)? .into_iter() .map(Category::into) .collect::>(); @@ -116,10 +118,10 @@ pub async fn summary(state: AppState) -> AppResult> { Ok(Json(json!({ "num_downloads": num_downloads, "num_crates": num_crates, - "new_crates": encode_crates(new_crates)?, - "most_downloaded": encode_crates(most_downloaded)?, - "most_recently_downloaded": encode_crates(most_recently_downloaded)?, - "just_updated": encode_crates(just_updated)?, + "new_crates": encode_crates(conn, new_crates)?, + "most_downloaded": encode_crates(conn, most_downloaded)?, + "most_recently_downloaded": encode_crates(conn, most_recently_downloaded)?, + "just_updated": encode_crates(conn, just_updated)?, "popular_keywords": popular_keywords, "popular_categories": popular_categories, }))) @@ -137,15 +139,15 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes .transpose()? .unwrap_or_default(); - let conn = app.db_read()?; - let krate: Crate = Crate::by_name(&name).first(&*conn)?; + let conn = &mut *app.db_read()?; + let krate: Crate = Crate::by_name(&name).first(conn)?; let versions_publishers_and_audit_actions = if include.versions { let mut versions_and_publishers: Vec<(Version, Option)> = krate .all_versions() .left_outer_join(users::table) .select((versions::all_columns, users::all_columns.nullable())) - .load(&*conn)?; + .load(conn)?; versions_and_publishers.sort_by_cached_key(|(version, _)| { Reverse(semver::Version::parse(&version.num).ok()) }); @@ -158,7 +160,7 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes Some( versions_and_publishers .into_iter() - .zip(VersionOwnerAction::for_versions(&conn, &versions)?.into_iter()) + .zip(VersionOwnerAction::for_versions(conn, &versions)?.into_iter()) .map(|((v, pb), aas)| (v, pb, aas)) .collect::>(), ) @@ -174,7 +176,7 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes CrateKeyword::belonging_to(&krate) .inner_join(keywords::table) .select(keywords::all_columns) - .load(&*conn)?, + .load(conn)?, ) } else { None @@ -184,7 +186,7 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes CrateCategory::belonging_to(&krate) .inner_join(categories::table) .select(categories::all_columns) - .load(&*conn)?, + .load(conn)?, ) } else { None @@ -192,7 +194,7 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes let recent_downloads = if include.downloads { RecentCrateDownloads::belonging_to(&krate) .select(recent_crate_downloads::downloads) - .get_result(&*conn) + .get_result(conn) .optional()? } else { None @@ -201,7 +203,7 @@ pub async fn show(app: AppState, Path(name): Path, req: Parts) -> AppRes let badges = if include.badges { Some(vec![]) } else { None }; let top_versions = if include.versions { - Some(krate.top_versions(&conn)?) + Some(krate.top_versions(conn)?) } else { None }; @@ -322,13 +324,13 @@ pub async fn readme( // this information already, but ember is definitely requesting it pub async fn versions(state: AppState, Path(crate_name): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; - let krate: Crate = Crate::by_name(&crate_name).first(&*conn)?; + let conn = &mut *state.db_read()?; + let krate: Crate = Crate::by_name(&crate_name).first(conn)?; let mut versions_and_publishers: Vec<(Version, Option)> = krate .all_versions() .left_outer_join(users::table) .select((versions::all_columns, users::all_columns.nullable())) - .load(&*conn)?; + .load(conn)?; versions_and_publishers .sort_by_cached_key(|(version, _)| Reverse(semver::Version::parse(&version.num).ok())); @@ -340,7 +342,7 @@ pub async fn versions(state: AppState, Path(crate_name): Path) -> AppRes .collect::>(); let versions = versions_and_publishers .into_iter() - .zip(VersionOwnerAction::for_versions(&conn, &versions)?.into_iter()) + .zip(VersionOwnerAction::for_versions(conn, &versions)?.into_iter()) .map(|((v, pb), aas)| EncodableVersion::from(v, &crate_name, pb, aas)) .collect::>(); @@ -356,12 +358,10 @@ pub async fn reverse_dependencies( req: Parts, ) -> AppResult> { conduit_compat(move || { - use diesel::dsl::any; - let pagination_options = PaginationOptions::builder().gather(&req)?; - let conn = app.db_read()?; - let krate: Crate = Crate::by_name(&name).first(&*conn)?; - let (rev_deps, total) = krate.reverse_dependencies(&conn, pagination_options)?; + let conn = &mut *app.db_read()?; + let krate: Crate = Crate::by_name(&name).first(conn)?; + let (rev_deps, total) = krate.reverse_dependencies(conn, pagination_options)?; let rev_deps: Vec<_> = rev_deps .into_iter() .map(|dep| EncodableDependency::from_reverse_dep(dep, &krate.name)) @@ -370,7 +370,7 @@ pub async fn reverse_dependencies( let version_ids: Vec = rev_deps.iter().map(|dep| dep.version_id).collect(); let versions_and_publishers: Vec<(Version, String, Option)> = versions::table - .filter(versions::id.eq(any(version_ids))) + .filter(versions::id.eq_any(version_ids)) .inner_join(crates::table) .left_outer_join(users::table) .select(( @@ -378,7 +378,7 @@ pub async fn reverse_dependencies( crates::name, users::all_columns.nullable(), )) - .load(&*conn)?; + .load(conn)?; let versions = versions_and_publishers .iter() .map(|(v, _, _)| v) @@ -386,7 +386,7 @@ pub async fn reverse_dependencies( .collect::>(); let versions = versions_and_publishers .into_iter() - .zip(VersionOwnerAction::for_versions(&conn, &versions)?.into_iter()) + .zip(VersionOwnerAction::for_versions(conn, &versions)?.into_iter()) .map(|((version, krate_name, published_by), actions)| { EncodableVersion::from(version, &krate_name, published_by, actions) }) diff --git a/src/controllers/krate/owners.rs b/src/controllers/krate/owners.rs index dd03a84eaed..086d381e733 100644 --- a/src/controllers/krate/owners.rs +++ b/src/controllers/krate/owners.rs @@ -11,10 +11,10 @@ use http::Request; /// Handles the `GET /crates/:crate_id/owners` route. pub async fn owners(state: AppState, Path(crate_name): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; - let krate: Crate = Crate::by_name(&crate_name).first(&*conn)?; + let conn = &mut *state.db_read()?; + let krate: Crate = Crate::by_name(&crate_name).first(conn)?; let owners = krate - .owners(&conn)? + .owners(conn)? .into_iter() .map(Owner::into) .collect::>(); @@ -27,9 +27,9 @@ pub async fn owners(state: AppState, Path(crate_name): Path) -> AppResul /// Handles the `GET /crates/:crate_id/owner_team` route. pub async fn owner_team(state: AppState, Path(crate_name): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; - let krate: Crate = Crate::by_name(&crate_name).first(&*conn)?; - let owners = Team::owning(&krate, &conn)? + let conn = &mut *state.db_read()?; + let krate: Crate = Crate::by_name(&crate_name).first(conn)?; + let owners = Team::owning(&krate, conn)? .into_iter() .map(Owner::into) .collect::>(); @@ -42,9 +42,9 @@ pub async fn owner_team(state: AppState, Path(crate_name): Path) -> AppR /// Handles the `GET /crates/:crate_id/owner_user` route. pub async fn owner_user(state: AppState, Path(crate_name): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; - let krate: Crate = Crate::by_name(&crate_name).first(&*conn)?; - let owners = User::owning(&krate, &conn)? + let conn = &mut *state.db_read()?; + let krate: Crate = Crate::by_name(&crate_name).first(conn)?; + let owners = User::owning(&krate, conn)? .into_iter() .map(Owner::into) .collect::>(); @@ -102,17 +102,17 @@ fn modify_owners( ) -> AppResult> { let logins = parse_owners_request(req)?; - let conn = app.db_write()?; + let conn = &mut *app.db_write()?; let auth = AuthCheck::default() .with_endpoint_scope(EndpointScope::ChangeOwners) .for_crate(crate_name) - .check(req, &conn)?; + .check(req, conn)?; let user = auth.user(); - conn.transaction(|| { - let krate: Crate = Crate::by_name(crate_name).first(&*conn)?; - let owners = krate.owners(&conn)?; + conn.transaction(|conn| { + let krate: Crate = Crate::by_name(crate_name).first(conn)?; + let owners = krate.owners(conn)?; match user.rights(app, &owners)? { Rights::Full => {} @@ -135,15 +135,15 @@ fn modify_owners( if owners.iter().any(login_test) { return Err(cargo_err(&format_args!("`{login}` is already an owner"))); } - let msg = krate.owner_add(app, &conn, user, login)?; + let msg = krate.owner_add(app, conn, user, login)?; msgs.push(msg); } msgs.join(",") } else { for login in &logins { - krate.owner_remove(app, &conn, user, login)?; + krate.owner_remove(app, conn, user, login)?; } - if User::owning(&krate, &conn)?.is_empty() { + if User::owning(&krate, conn)?.is_empty() { return Err(cargo_err( "cannot remove all individual owners of a crate. \ Team member don't have permission to modify owners, so \ diff --git a/src/controllers/krate/publish.rs b/src/controllers/krate/publish.rs index a439493a438..d2eaefa4e95 100644 --- a/src/controllers/krate/publish.rs +++ b/src/controllers/krate/publish.rs @@ -76,13 +76,13 @@ pub async fn publish(app: AppState, req: BytesRequest) -> AppResult(&*conn) + .first::(conn) .optional()?; let endpoint_scope = match existing_crate { @@ -93,12 +93,12 @@ pub async fn publish(app: AppState, req: BytesRequest) -> AppResult AppResult AppResult AppResult= daily_version_limit as i64 { return Err(cargo_err( "You have published too many versions of this crate in the last 24 hours", @@ -202,10 +202,10 @@ pub async fn publish(app: AppState, req: BytesRequest) -> AppResult AppResult AppResult AppResult AppResult QueryResult { +fn count_versions_published_today(krate_id: i32, conn: &mut PgConnection) -> QueryResult { use crate::schema::versions::dsl::*; use diesel::dsl::{now, IntervalDsl}; @@ -346,7 +346,7 @@ pub fn missing_metadata_error_message(missing: &[&str]) -> String { } pub fn add_dependencies( - conn: &PgConnection, + conn: &mut PgConnection, deps: &[EncodableCrateDependency], target_version_id: i32, ) -> AppResult> { diff --git a/src/controllers/krate/search.rs b/src/controllers/krate/search.rs index 18d976f5396..9dc81747a06 100644 --- a/src/controllers/krate/search.rs +++ b/src/controllers/krate/search.rs @@ -115,7 +115,7 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { ); } - let conn = app.db_read()?; + let conn = &mut *app.db_read()?; if let Some(kws) = params.get("all_keywords") { // Calculating the total number of results with filters is not supported yet. @@ -190,7 +190,7 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { // Calculating the total number of results with filters is not supported yet. supports_seek = false; - let user_id = AuthCheck::default().check(&req, &conn)?.user_id(); + let user_id = AuthCheck::default().check(&req, conn)?.user_id(); query = query.filter( crates::id.eq_any( @@ -209,7 +209,7 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { .map(|(_, value)| value.to_string()) .collect(); - query = query.filter(crates::name.eq(any(ids))); + query = query.filter(crates::name.eq_any(ids)); } if !include_yanked { @@ -265,12 +265,12 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { let (total, next_page, prev_page, data, conn) = if supports_seek && !explicit_page { // Equivalent of: // `WHERE name > (SELECT name FROM crates WHERE id = $1) LIMIT $2` - query = query.limit(pagination.per_page as i64); + query = query.limit(pagination.per_page); if let Some(seek) = seek { let crate_name: String = crates::table .find(seek) .select(crates::name) - .get_result(&*conn)?; + .get_result(conn)?; query = query.filter(crates::name.gt(crate_name)); } @@ -280,9 +280,9 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { // // If this becomes a problem in the future the crates count could be denormalized, at least // for the filterless happy path. - let total: i64 = crates::table.count().get_result(&*conn)?; + let total: i64 = crates::table.count().get_result(conn)?; - let results: Vec<(Crate, bool, Option)> = query.load(&*conn)?; + let results: Vec<(Crate, bool, Option)> = query.load(conn)?; let next_page = if let Some(last) = results.last() { let mut params = IndexMap::new(); @@ -298,7 +298,7 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { (total, next_page, None, results, conn) } else { let query = query.pages_pagination(pagination); - let data: Paginated<(Crate, bool, Option)> = query.load(&conn)?; + let data: Paginated<(Crate, bool, Option)> = query.load(conn)?; ( data.total(), data.next_page_params().map(|p| req.query_with_params(p)), @@ -315,7 +315,7 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { .collect::>(); let crates = data.into_iter().map(|(c, _, _)| c).collect::>(); - let versions: Vec = crates.versions().load(&*conn)?; + let versions: Vec = crates.versions().load(conn)?; let versions = versions .grouped_by(&crates) .into_iter() @@ -350,4 +350,4 @@ pub async fn search(app: AppState, req: Parts) -> AppResult> { .await } -diesel_infix_operator!(Contains, "@>"); +diesel::infix_operator!(Contains, "@>"); diff --git a/src/controllers/metrics.rs b/src/controllers/metrics.rs index 5a9f96f859b..c4d83e4a09e 100644 --- a/src/controllers/metrics.rs +++ b/src/controllers/metrics.rs @@ -27,7 +27,7 @@ pub async fn prometheus( } let metrics = match kind.as_str() { - "service" => app.service_metrics.gather(&*app.db_read()?)?, + "service" => app.service_metrics.gather(&mut *app.db_read()?)?, "instance" => app.instance_metrics.gather(&app)?, _ => return Err(not_found()), }; diff --git a/src/controllers/team.rs b/src/controllers/team.rs index 88413bc0646..e8d040e1cd7 100644 --- a/src/controllers/team.rs +++ b/src/controllers/team.rs @@ -9,8 +9,8 @@ pub async fn show_team(state: AppState, Path(name): Path) -> AppResult AppResult> { conduit_compat(move || { - let conn = app.db_read_prefer_primary()?; - let auth = AuthCheck::only_cookie().check(&req, &conn)?; + let conn = &mut *app.db_read_prefer_primary()?; + let auth = AuthCheck::only_cookie().check(&req, conn)?; let user = auth.user(); let tokens: Vec = ApiToken::belonging_to(user) .filter(api_tokens::revoked.eq(false)) .order(api_tokens::created_at.desc()) - .load(&*conn)?; + .load(conn)?; Ok(Json(json!({ "api_tokens": tokens }))) }) @@ -51,9 +51,9 @@ pub async fn new(app: AppState, req: BytesRequest) -> AppResult> { return Err(bad_request("name must have a value")); } - let conn = app.db_write()?; + let conn = &mut *app.db_write()?; - let auth = AuthCheck::default().check(&req, &conn)?; + let auth = AuthCheck::default().check(&req, conn)?; if auth.api_token_id().is_some() { return Err(bad_request( "cannot use an API token to create a new API token", @@ -63,7 +63,7 @@ pub async fn new(app: AppState, req: BytesRequest) -> AppResult> { let user = auth.user(); let max_token_per_user = 500; - let count: i64 = ApiToken::belonging_to(user).count().get_result(&*conn)?; + let count: i64 = ApiToken::belonging_to(user).count().get_result(conn)?; if count >= max_token_per_user { return Err(bad_request(&format!( "maximum tokens per user is: {max_token_per_user}" @@ -95,7 +95,7 @@ pub async fn new(app: AppState, req: BytesRequest) -> AppResult> { .map_err(|_err| bad_request("invalid endpoint scope"))?; let api_token = - ApiToken::insert_with_scopes(&conn, user.id, name, crate_scopes, endpoint_scopes)?; + ApiToken::insert_with_scopes(conn, user.id, name, crate_scopes, endpoint_scopes)?; let api_token = EncodableApiTokenWithToken::from(api_token); Ok(Json(json!({ "api_token": api_token }))) @@ -106,12 +106,12 @@ pub async fn new(app: AppState, req: BytesRequest) -> AppResult> { /// Handles the `DELETE /me/tokens/:id` route. pub async fn revoke(app: AppState, Path(id): Path, req: Parts) -> AppResult> { conduit_compat(move || { - let conn = app.db_write()?; - let auth = AuthCheck::default().check(&req, &conn)?; + let conn = &mut *app.db_write()?; + let auth = AuthCheck::default().check(&req, conn)?; let user = auth.user(); diesel::update(ApiToken::belonging_to(user).find(id)) .set(api_tokens::revoked.eq(true)) - .execute(&*conn)?; + .execute(conn)?; Ok(Json(json!({}))) }) @@ -121,15 +121,15 @@ pub async fn revoke(app: AppState, Path(id): Path, req: Parts) -> AppResult /// Handles the `DELETE /tokens/current` route. pub async fn revoke_current(app: AppState, req: Parts) -> AppResult { conduit_compat(move || { - let conn = app.db_write()?; - let auth = AuthCheck::default().check(&req, &conn)?; + let conn = &mut *app.db_write()?; + let auth = AuthCheck::default().check(&req, conn)?; let api_token_id = auth .api_token_id() .ok_or_else(|| bad_request("token not provided"))?; diesel::update(api_tokens::table.filter(api_tokens::id.eq(api_token_id))) .set(api_tokens::revoked.eq(true)) - .execute(&*conn)?; + .execute(conn)?; Ok(StatusCode::NO_CONTENT.into_response()) }) diff --git a/src/controllers/user/me.rs b/src/controllers/user/me.rs index fa633a7ff3f..275eb1f655c 100644 --- a/src/controllers/user/me.rs +++ b/src/controllers/user/me.rs @@ -15,8 +15,8 @@ use crate::views::{EncodableMe, EncodablePrivateUser, EncodableVersion, OwnedCra /// Handles the `GET /me` route. pub async fn me(app: AppState, req: Parts) -> AppResult> { conduit_compat(move || { - let conn = app.db_read_prefer_primary()?; - let user_id = AuthCheck::only_cookie().check(&req, &conn)?.user_id(); + let conn = &mut *app.db_read_prefer_primary()?; + let user_id = AuthCheck::only_cookie().check(&req, conn)?.user_id(); let (user, verified, email, verification_sent): (User, Option, Option, bool) = users::table @@ -28,14 +28,14 @@ pub async fn me(app: AppState, req: Parts) -> AppResult> { emails::email.nullable(), emails::token_generated_at.nullable().is_not_null(), )) - .first(&*conn)?; + .first(conn)?; let owned_crates = CrateOwner::by_owner_kind(OwnerKind::User) .inner_join(crates::table) .filter(crate_owners::owner_id.eq(user_id)) .select((crates::id, crates::name, crate_owners::email_notifications)) .order(crates::name.asc()) - .load(&*conn)? + .load(conn)? .into_iter() .map(|(id, name, email_notifications)| OwnedCrate { id, @@ -57,17 +57,15 @@ pub async fn me(app: AppState, req: Parts) -> AppResult> { /// Handles the `GET /me/updates` route. pub async fn updates(app: AppState, req: Parts) -> AppResult> { conduit_compat(move || { - use diesel::dsl::any; - - let conn = app.db_read_prefer_primary()?; - let auth = AuthCheck::only_cookie().check(&req, &conn)?; + let conn = &mut app.db_read_prefer_primary()?; + let auth = AuthCheck::only_cookie().check(&req, conn)?; let user = auth.user(); let followed_crates = Follow::belonging_to(user).select(follows::crate_id); let query = versions::table .inner_join(crates::table) .left_outer_join(users::table) - .filter(crates::id.eq(any(followed_crates))) + .filter(crates::id.eq_any(followed_crates)) .order(versions::created_at.desc()) .select(( versions::all_columns, @@ -75,12 +73,12 @@ pub async fn updates(app: AppState, req: Parts) -> AppResult> { users::all_columns.nullable(), )) .pages_pagination(PaginationOptions::builder().gather(&req)?); - let data: Paginated<(Version, String, Option)> = query.load(&conn)?; + let data: Paginated<(Version, String, Option)> = query.load(conn)?; let more = data.next_page_params().is_some(); let versions = data.iter().map(|(v, _, _)| v).cloned().collect::>(); let data = data .into_iter() - .zip(VersionOwnerAction::for_versions(&conn, &versions)?.into_iter()) + .zip(VersionOwnerAction::for_versions(conn, &versions)?.into_iter()) .map(|((v, cn, pb), voas)| (v, cn, pb, voas)); let versions = data @@ -109,9 +107,9 @@ pub async fn update_user( use diesel::insert_into; let state = app.clone(); - let conn = state.db_write()?; + let conn = &mut state.db_write()?; - let auth = AuthCheck::default().check(&req, &conn)?; + let auth = AuthCheck::default().check(&req, conn)?; let user = auth.user(); // need to check if current user matches user to be updated @@ -141,7 +139,7 @@ pub async fn update_user( return Err(bad_request("empty email rejected")); } - conn.transaction::<_, BoxedAppError, _>(|| { + conn.transaction::<_, BoxedAppError, _>(|conn| { let new_email = NewEmail { user_id: user.id, email: user_email, @@ -153,7 +151,7 @@ pub async fn update_user( .do_update() .set(&new_email) .returning(emails::token) - .get_result(&*conn) + .get_result(conn) .map_err(|_| server_error("Error in creating token"))?; // This swallows any errors that occur while attempting to send the email. Some users have @@ -177,11 +175,11 @@ pub async fn confirm_user_email(state: AppState, Path(token): Path) -> A conduit_compat(move || { use diesel::update; - let conn = state.db_write()?; + let conn = &mut *state.db_write()?; let updated_rows = update(emails::table.filter(emails::token.eq(&token))) .set(emails::verified.eq(true)) - .execute(&*conn)?; + .execute(conn)?; if updated_rows == 0 { return Err(bad_request("Email belonging to token not found.")); @@ -202,9 +200,9 @@ pub async fn regenerate_token_and_send( use diesel::dsl::sql; use diesel::update; - let conn = state.db_write()?; + let conn = &mut state.db_write()?; - let auth = AuthCheck::default().check(&req, &conn)?; + let auth = AuthCheck::default().check(&req, conn)?; let user = auth.user(); // need to check if current user matches user to be updated @@ -212,10 +210,10 @@ pub async fn regenerate_token_and_send( return Err(bad_request("current user does not match requested user")); } - conn.transaction(|| { + conn.transaction(|conn| { let email: Email = update(Email::belonging_to(user)) .set(emails::token.eq(sql("DEFAULT"))) - .get_result(&*conn) + .get_result(conn) .map_err(|_| bad_request("Email could not be found"))?; state @@ -247,14 +245,14 @@ pub async fn update_email_notifications(app: AppState, req: BytesRequest) -> App .map(|c| (c.id, c.email_notifications)) .collect(); - let conn = app.db_write()?; - let user_id = AuthCheck::default().check(&req, &conn)?.user_id(); + let conn = &mut *app.db_write()?; + let user_id = AuthCheck::default().check(&req, conn)?.user_id(); // Build inserts from existing crates belonging to the current user let to_insert = CrateOwner::by_owner_kind(OwnerKind::User) .filter(owner_id.eq(user_id)) .select((crate_id, owner_id, owner_kind, email_notifications)) - .load(&*conn)? + .load(conn)? .into_iter() // Remove records whose `email_notifications` will not change from their current value .map( @@ -276,7 +274,7 @@ pub async fn update_email_notifications(app: AppState, req: BytesRequest) -> App .on_conflict((crate_id, owner_id, owner_kind)) .do_update() .set(email_notifications.eq(excluded(email_notifications))) - .execute(&*conn)?; + .execute(conn)?; ok_true() }) diff --git a/src/controllers/user/other.rs b/src/controllers/user/other.rs index e9e99b07a04..1618e3f1d90 100644 --- a/src/controllers/user/other.rs +++ b/src/controllers/user/other.rs @@ -11,11 +11,11 @@ pub async fn show(state: AppState, Path(user_name): Path) -> AppResult) -> AppResult>(&*conn)? + .first::>(conn)? .unwrap_or(0); Ok(Json(json!({ "total_downloads": data }))) diff --git a/src/controllers/user/session.rs b/src/controllers/user/session.rs index 10a0d5b3659..96341fa5e02 100644 --- a/src/controllers/user/session.rs +++ b/src/controllers/user/session.rs @@ -101,7 +101,8 @@ pub async fn authorize( // Fetch the user info from GitHub using the access token we just got and create a user record let ghuser = app.github.current_user(token)?; - let user = save_user_to_database(&ghuser, token.secret(), &app.emails, &*app.db_write()?)?; + let user = + save_user_to_database(&ghuser, token.secret(), &app.emails, &mut *app.db_write()?)?; // Log in by setting a cookie and the middleware authentication session.insert("user_id".to_string(), user.id.to_string()); @@ -117,7 +118,7 @@ fn save_user_to_database( user: &GithubUser, access_token: &str, emails: &Emails, - conn: &PgConnection, + conn: &mut PgConnection, ) -> AppResult { NewUser::new( user.id, @@ -162,7 +163,7 @@ mod tests { #[test] fn gh_user_with_invalid_email_doesnt_fail() { let emails = Emails::new_in_memory(); - let conn = pg_connection(); + let conn = &mut pg_connection(); let gh_user = GithubUser { email: Some("String.Format(\"{0}.{1}@live.com\", FirstName, LastName)".into()), name: Some("My Name".into()), @@ -170,7 +171,7 @@ mod tests { id: -1, avatar_url: None, }; - let result = save_user_to_database(&gh_user, "arbitrary_token", &emails, &conn); + let result = save_user_to_database(&gh_user, "arbitrary_token", &emails, conn); assert!( result.is_ok(), diff --git a/src/controllers/version.rs b/src/controllers/version.rs index 714e17378f7..9e82804ed43 100644 --- a/src/controllers/version.rs +++ b/src/controllers/version.rs @@ -8,7 +8,7 @@ use super::prelude::*; use crate::models::{Crate, Version}; fn version_and_crate( - conn: &PgConnection, + conn: &mut PgConnection, crate_name: &str, semver: &str, ) -> AppResult<(Version, Crate)> { diff --git a/src/controllers/version/deprecated.rs b/src/controllers/version/deprecated.rs index 52d3520b7a8..56e3f1f3047 100644 --- a/src/controllers/version/deprecated.rs +++ b/src/controllers/version/deprecated.rs @@ -14,8 +14,7 @@ use crate::views::EncodableVersion; /// Handles the `GET /versions` route. pub async fn index(app: AppState, req: Parts) -> AppResult> { conduit_compat(move || { - use diesel::dsl::any; - let conn = app.db_read()?; + let conn = &mut *app.db_read()?; // Extract all ids requested. let query = url::form_urlencoded::parse(req.uri.query().unwrap_or("").as_bytes()); @@ -31,8 +30,8 @@ pub async fn index(app: AppState, req: Parts) -> AppResult> { crates::name, users::all_columns.nullable(), )) - .filter(versions::id.eq(any(ids))) - .load(&*conn)?; + .filter(versions::id.eq_any(ids)) + .load(conn)?; let versions = versions_and_publishers .iter() .map(|(v, _, _)| v) @@ -40,7 +39,7 @@ pub async fn index(app: AppState, req: Parts) -> AppResult> { .collect::>(); let versions = versions_and_publishers .into_iter() - .zip(VersionOwnerAction::for_versions(&conn, &versions)?.into_iter()) + .zip(VersionOwnerAction::for_versions(conn, &versions)?.into_iter()) .map(|((version, crate_name, published_by), actions)| { EncodableVersion::from(version, &crate_name, published_by, actions) }) @@ -56,7 +55,7 @@ pub async fn index(app: AppState, req: Parts) -> AppResult> { /// be returned by `krate::show`. pub async fn show_by_id(state: AppState, Path(id): Path) -> AppResult> { conduit_compat(move || { - let conn = state.db_read()?; + let conn = &mut *state.db_read()?; let (version, krate, published_by): (Version, Crate, Option) = versions::table .find(id) .inner_join(crates::table) @@ -66,8 +65,8 @@ pub async fn show_by_id(state: AppState, Path(id): Path) -> AppResult(&**conn) + .first::<(i32, String)>(&mut *conn) })?; // The increment does not happen instantly, but it's deferred to be executed in a batch @@ -134,8 +134,8 @@ pub async fn downloads( return Err(cargo_err(&format_args!("invalid semver: {version}"))); } - let conn = app.db_read()?; - let (version, _) = version_and_crate(&conn, &crate_name, &version)?; + let conn = &mut *app.db_read()?; + let (version, _) = version_and_crate(conn, &crate_name, &version)?; let cutoff_end_date = req .query() @@ -147,7 +147,7 @@ pub async fn downloads( let downloads = VersionDownload::belonging_to(&version) .filter(version_downloads::date.between(cutoff_start_date, cutoff_end_date)) .order(version_downloads::date) - .load(&*conn)? + .load(conn)? .into_iter() .map(VersionDownload::into) .collect::>(); diff --git a/src/controllers/version/metadata.rs b/src/controllers/version/metadata.rs index 446dd43f187..26550ba8b3d 100644 --- a/src/controllers/version/metadata.rs +++ b/src/controllers/version/metadata.rs @@ -27,9 +27,9 @@ pub async fn dependencies( return Err(cargo_err(&format_args!("invalid semver: {version}"))); } - let conn = state.db_read()?; - let (version, _) = version_and_crate(&conn, &crate_name, &version)?; - let deps = version.dependencies(&conn)?; + let conn = &mut state.db_read()?; + let (version, _) = version_and_crate(conn, &crate_name, &version)?; + let deps = version.dependencies(conn)?; let deps = deps .into_iter() .map(|(dep, crate_name)| EncodableDependency::from_dep(dep, &crate_name)) @@ -64,10 +64,10 @@ pub async fn show( return Err(cargo_err(&format_args!("invalid semver: {version}"))); } - let conn = state.db_read()?; - let (version, krate) = version_and_crate(&conn, &crate_name, &version)?; - let published_by = version.published_by(&conn); - let actions = VersionOwnerAction::by_version(&conn, &version)?; + let conn = &mut state.db_read()?; + let (version, krate) = version_and_crate(conn, &crate_name, &version)?; + let published_by = version.published_by(conn); + let actions = VersionOwnerAction::by_version(conn, &version)?; let version = EncodableVersion::from(version, &krate.name, published_by, actions); Ok(Json(json!({ "version": version }))) diff --git a/src/controllers/version/yank.rs b/src/controllers/version/yank.rs index fdc7fa32a10..45313ee93a8 100644 --- a/src/controllers/version/yank.rs +++ b/src/controllers/version/yank.rs @@ -51,17 +51,17 @@ fn modify_yank( return Err(cargo_err(&format_args!("invalid semver: {version}"))); } - let conn = state.db_write()?; + let conn = &mut *state.db_write()?; let auth = AuthCheck::default() .with_endpoint_scope(EndpointScope::Yank) .for_crate(crate_name) - .check(req, &conn)?; + .check(req, conn)?; - let (version, krate) = version_and_crate(&conn, crate_name, version)?; + let (version, krate) = version_and_crate(conn, crate_name, version)?; let api_token_id = auth.api_token_id(); let user = auth.user(); - let owners = krate.owners(&conn)?; + let owners = krate.owners(conn)?; if user.rights(state, &owners)? < Rights::Publish { return Err(cargo_err("must already be an owner to yank or unyank")); @@ -74,7 +74,7 @@ fn modify_yank( diesel::update(&version) .set(versions::yanked.eq(yanked)) - .execute(&*conn)?; + .execute(conn)?; let action = if yanked { VersionAction::Yank @@ -82,9 +82,9 @@ fn modify_yank( VersionAction::Unyank }; - insert_version_owner_action(&conn, version.id, user.id, api_token_id, action)?; + insert_version_owner_action(conn, version.id, user.id, api_token_id, action)?; - worker::sync_yanked(krate.name, version.num).enqueue(&conn)?; + worker::sync_yanked(krate.name, version.num).enqueue(conn)?; ok_true() } diff --git a/src/db.rs b/src/db.rs index d040b786826..13c36cec9e3 100644 --- a/src/db.rs +++ b/src/db.rs @@ -2,7 +2,10 @@ use diesel::prelude::*; use diesel::r2d2::{self, ConnectionManager, CustomizeConnection}; use prometheus::Histogram; use std::sync::{Arc, Mutex, MutexGuard}; -use std::{ops::Deref, time::Duration}; +use std::{ + ops::{Deref, DerefMut}, + time::Duration, +}; use thiserror::Error; use url::Url; @@ -67,7 +70,7 @@ impl DieselPool { } pub(crate) fn new_test(config: &config::DatabasePools, url: &str) -> DieselPool { - let conn = PgConnection::establish(&connection_url(config, url)) + let mut conn = PgConnection::establish(&connection_url(config, url)) .expect("failed to establish connection"); conn.begin_test_transaction() .expect("failed to begin test transaction"); @@ -150,6 +153,15 @@ impl Deref for DieselPooledConn<'_> { } } +impl DerefMut for DieselPooledConn<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + DieselPooledConn::Pool(conn) => conn.deref_mut(), + DieselPooledConn::Test(conn) => conn.deref_mut(), + } + } +} + pub fn oneoff_connection_with_config( config: &config::DatabasePools, ) -> ConnectionResult { @@ -213,7 +225,7 @@ impl CustomizeConnection for ConnectionConfig { #[cfg(test)] pub(crate) fn test_conn() -> PgConnection { - let conn = PgConnection::establish(&crate::env("TEST_DATABASE_URL")).unwrap(); + let mut conn = PgConnection::establish(&crate::env("TEST_DATABASE_URL")).unwrap(); conn.begin_test_transaction().unwrap(); conn } diff --git a/src/downloads_counter.rs b/src/downloads_counter.rs index 21a9b03567b..ca2a3cdb16f 100644 --- a/src/downloads_counter.rs +++ b/src/downloads_counter.rs @@ -65,16 +65,16 @@ impl DownloadsCounter { } pub fn persist_all_shards(&self, app: &App) -> Result { - let conn = app.primary_database.get()?; - self.persist_all_shards_with_conn(&conn) + let conn = &mut app.primary_database.get()?; + self.persist_all_shards_with_conn(conn) } pub fn persist_next_shard(&self, app: &App) -> Result { - let conn = app.primary_database.get()?; - self.persist_next_shard_with_conn(&conn) + let conn = &mut app.primary_database.get()?; + self.persist_next_shard_with_conn(conn) } - fn persist_all_shards_with_conn(&self, conn: &PgConnection) -> Result { + fn persist_all_shards_with_conn(&self, conn: &mut PgConnection) -> Result { let mut stats = PersistStats::default(); for shard in self.inner.shards() { let shard = std::mem::take(&mut *shard.write()); @@ -84,7 +84,7 @@ impl DownloadsCounter { Ok(stats) } - fn persist_next_shard_with_conn(&self, conn: &PgConnection) -> Result { + fn persist_next_shard_with_conn(&self, conn: &mut PgConnection) -> Result { // Replace the next shard in the ring with an empty HashMap (clearing it), and return the // previous contents for processing. The fetch_add method wraps around on overflow, so it's // fine to keep incrementing it without resetting. @@ -99,7 +99,7 @@ impl DownloadsCounter { fn persist_shard<'a, Iter: Iterator)>>( &self, - conn: &PgConnection, + conn: &mut PgConnection, shard: Iter, ) -> Result { use crate::schema::{version_downloads, versions}; @@ -252,12 +252,12 @@ mod tests { #[test] fn test_increment_and_persist_all() { let counter = DownloadsCounter::new(); - let conn = crate::db::test_conn(); - let mut state = State::new(&conn); + let conn = &mut crate::db::test_conn(); + let mut state = State::new(conn); - let v1 = state.new_version(&conn); - let v2 = state.new_version(&conn); - let v3 = state.new_version(&conn); + let v1 = state.new_version(conn); + let v2 = state.new_version(conn); + let v3 = state.new_version(conn); // Add 15 downloads between v1 and v2, and no downloads for v3. for _ in 0..10 { @@ -270,7 +270,7 @@ mod tests { // Persist everything to the database let stats = counter - .persist_all_shards_with_conn(&conn) + .persist_all_shards_with_conn(conn) .expect("failed to persist all shards"); // Ensure the stats are accurate @@ -285,24 +285,24 @@ mod tests { ); // Ensure the download counts in the database are what we expect. - state.assert_downloads_count(&conn, v1, 10); - state.assert_downloads_count(&conn, v2, 5); - state.assert_downloads_count(&conn, v3, 0); + state.assert_downloads_count(conn, v1, 10); + state.assert_downloads_count(conn, v2, 5); + state.assert_downloads_count(conn, v3, 0); } #[test] fn test_increment_and_persist_shard() { let counter = DownloadsCounter::new(); - let conn = crate::db::test_conn(); - let mut state = State::new(&conn); + let conn = &mut crate::db::test_conn(); + let mut state = State::new(conn); - let v1 = state.new_version(&conn); + let v1 = state.new_version(conn); let v1_shard = counter.inner.determine_map(&v1); // For this test to work we need the two versions to be stored in different shards. - let mut v2 = state.new_version(&conn); + let mut v2 = state.new_version(conn); while counter.inner.determine_map(&v2) == v1_shard { - v2 = state.new_version(&conn); + v2 = state.new_version(conn); } let v2_shard = counter.inner.determine_map(&v2); @@ -319,7 +319,7 @@ mod tests { let mut pending = 15; for shard in 0..counter.shards_count() { let stats = counter - .persist_next_shard_with_conn(&conn) + .persist_next_shard_with_conn(conn) .expect("failed to persist shard"); if shard == v1_shard { @@ -333,7 +333,7 @@ mod tests { pending_downloads: pending, } ); - state.assert_downloads_count(&conn, v1, 10); + state.assert_downloads_count(conn, v1, 10); } else if shard == v2_shard { pending -= 5; assert_eq!( @@ -345,7 +345,7 @@ mod tests { pending_downloads: pending, } ); - state.assert_downloads_count(&conn, v2, 5); + state.assert_downloads_count(conn, v2, 5); } else { assert_eq!( stats, @@ -361,8 +361,8 @@ mod tests { assert_eq!(pending, 0); // Finally ensure that the download counts in the database are what we expect. - state.assert_downloads_count(&conn, v1, 10); - state.assert_downloads_count(&conn, v2, 5); + state.assert_downloads_count(conn, v1, 10); + state.assert_downloads_count(conn, v2, 5); } #[test] @@ -384,10 +384,10 @@ mod tests { F: Fn(&DashMap, i32, i32) -> bool, { let counter = DownloadsCounter::new(); - let conn = crate::db::test_conn(); - let mut state = State::new(&conn); + let conn = &mut crate::db::test_conn(); + let mut state = State::new(conn); - let v1 = state.new_version(&conn); + let v1 = state.new_version(conn); // Generate the second version. It should **not** already be in the database. let mut v2 = v1 + 1; @@ -401,7 +401,7 @@ mod tests { // No error should happen when persisting. The missing versions should be ignored. let stats = counter - .persist_all_shards_with_conn(&conn) + .persist_all_shards_with_conn(conn) .expect("failed to persist download counts"); // The download should not be counted for version 2. @@ -414,8 +414,8 @@ mod tests { pending_downloads: 0, } ); - state.assert_downloads_count(&conn, v1, 1); - state.assert_downloads_count(&conn, v2, 0); + state.assert_downloads_count(conn, v1, 1); + state.assert_downloads_count(conn, v2, 0); } struct State { @@ -425,7 +425,7 @@ mod tests { } impl State { - fn new(conn: &PgConnection) -> Self { + fn new(conn: &mut PgConnection) -> Self { let user = NewUser { gh_id: 0, gh_login: "ghost", @@ -448,7 +448,7 @@ mod tests { } } - fn new_version(&mut self, conn: &PgConnection) -> i32 { + fn new_version(&mut self, conn: &mut PgConnection) -> i32 { let version = NewVersion::new( self.krate.id, &Version::parse(&format!("{}.0.0", self.next_version)).unwrap(), @@ -468,7 +468,7 @@ mod tests { version.id } - fn assert_downloads_count(&self, conn: &PgConnection, version: i32, expected: i64) { + fn assert_downloads_count(&self, conn: &mut PgConnection, version: i32, expected: i64) { use crate::schema::version_downloads::dsl::*; use diesel::dsl::*; diff --git a/src/lib.rs b/src/lib.rs index 0b243b9a40e..b5ecbe055cb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,8 +17,6 @@ extern crate derive_deref; #[macro_use] extern crate diesel; #[macro_use] -extern crate diesel_migrations; -#[macro_use] extern crate serde; #[macro_use] extern crate serde_json; diff --git a/src/metrics/service.rs b/src/metrics/service.rs index 11e1075547c..1a7d3f323bb 100644 --- a/src/metrics/service.rs +++ b/src/metrics/service.rs @@ -30,7 +30,7 @@ metrics! { } impl ServiceMetrics { - pub(crate) fn gather(&self, conn: &PgConnection) -> AppResult> { + pub(crate) fn gather(&self, conn: &mut PgConnection) -> AppResult> { self.crates_total .set(crates::table.select(count_star()).first(conn)?); self.versions_total diff --git a/src/models/action.rs b/src/models/action.rs index 0fba86a1909..583bb32e432 100644 --- a/src/models/action.rs +++ b/src/models/action.rs @@ -6,14 +6,13 @@ use diesel::{ serialize::{self, Output, ToSql}, sql_types::Integer, }; -use std::io::Write; use crate::models::{ApiToken, User, Version}; use crate::schema::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, FromSqlRow, AsExpression)] #[repr(i32)] -#[sql_type = "Integer"] +#[diesel(sql_type = Integer)] pub enum VersionAction { Publish = 0, Yank = 1, @@ -39,7 +38,7 @@ impl From for String { } impl FromSql for VersionAction { - fn from_sql(bytes: Option<&[u8]>) -> deserialize::Result { + fn from_sql(bytes: diesel::pg::PgValue<'_>) -> deserialize::Result { match >::from_sql(bytes)? { 0 => Ok(VersionAction::Publish), 1 => Ok(VersionAction::Yank), @@ -50,16 +49,16 @@ impl FromSql for VersionAction { } impl ToSql for VersionAction { - fn to_sql(&self, out: &mut Output<'_, W, Pg>) -> serialize::Result { - ToSql::::to_sql(&(*self as i32), out) + fn to_sql(&self, out: &mut Output<'_, '_, Pg>) -> serialize::Result { + ToSql::::to_sql(&(*self as i32), &mut out.reborrow()) } } #[derive(Debug, Clone, Copy, Queryable, Identifiable, Associations)] -#[belongs_to(Version)] -#[belongs_to(User, foreign_key = "user_id")] -#[belongs_to(ApiToken, foreign_key = "api_token_id")] -#[table_name = "version_owner_actions"] +#[diesel(belongs_to(Version))] +#[diesel(belongs_to(User, foreign_key = user_id))] +#[diesel(belongs_to(ApiToken, foreign_key = api_token_id))] +#[diesel(table_name = version_owner_actions)] pub struct VersionOwnerAction { pub id: i32, pub version_id: i32, @@ -70,11 +69,14 @@ pub struct VersionOwnerAction { } impl VersionOwnerAction { - pub fn all(conn: &PgConnection) -> QueryResult> { + pub fn all(conn: &mut PgConnection) -> QueryResult> { version_owner_actions::table.load(conn) } - pub fn by_version(conn: &PgConnection, version: &Version) -> QueryResult> { + pub fn by_version( + conn: &mut PgConnection, + version: &Version, + ) -> QueryResult> { use version_owner_actions::dsl::version_id; version_owner_actions::table @@ -85,7 +87,7 @@ impl VersionOwnerAction { } pub fn for_versions( - conn: &PgConnection, + conn: &mut PgConnection, versions: &[Version], ) -> QueryResult>> { Ok(Self::belonging_to(versions) @@ -97,7 +99,7 @@ impl VersionOwnerAction { } pub fn insert_version_owner_action( - conn: &PgConnection, + conn: &mut PgConnection, version_id_: i32, user_id_: i32, api_token_id_: Option, diff --git a/src/models/category.rs b/src/models/category.rs index 0d0a2780654..5a51d366d4b 100644 --- a/src/models/category.rs +++ b/src/models/category.rs @@ -5,7 +5,7 @@ use crate::models::Crate; use crate::schema::*; #[derive(Clone, Identifiable, Queryable, QueryableByName, Debug)] -#[table_name = "categories"] +#[diesel(table_name = categories)] pub struct Category { pub id: i32, pub category: String, @@ -17,22 +17,12 @@ pub struct Category { type WithSlug<'a> = diesel::dsl::Eq>; type BySlug<'a> = diesel::dsl::Filter>; -type WithSlugsCaseSensitive<'a> = diesel::dsl::Eq< - categories::slug, - diesel::pg::expression::array_comparison::Any< - diesel::expression::bound::Bound< - diesel::sql_types::Array, - &'a [&'a str], - >, - >, ->; -type BySlugsCaseSensitive<'a> = diesel::dsl::Filter>; #[derive(Associations, Insertable, Identifiable, Debug, Clone, Copy)] -#[belongs_to(Category)] -#[belongs_to(Crate)] -#[table_name = "crates_categories"] -#[primary_key(crate_id, category_id)] +#[diesel(belongs_to(Category))] +#[diesel(belongs_to(Crate))] +#[diesel(table_name = crates_categories)] +#[diesel(primary_key(crate_id, category_id))] pub struct CrateCategory { crate_id: i32, category_id: i32, @@ -47,22 +37,15 @@ impl Category { categories::table.filter(Self::with_slug(slug)) } - pub fn with_slugs_case_sensitive<'a>(slugs: &'a [&'a str]) -> WithSlugsCaseSensitive<'a> { - use diesel::dsl::any; - categories::slug.eq(any(slugs)) - } - - pub fn by_slugs_case_sensitive<'a>(slugs: &'a [&'a str]) -> BySlugsCaseSensitive<'a> { - categories::table.filter(Self::with_slugs_case_sensitive(slugs)) - } - pub fn update_crate( - conn: &PgConnection, + conn: &mut PgConnection, krate: &Crate, slugs: &[&str], ) -> QueryResult> { - conn.transaction(|| { - let categories: Vec = Category::by_slugs_case_sensitive(slugs).load(conn)?; + conn.transaction(|conn| { + let categories: Vec = categories::table + .filter(categories::slug.eq_any(slugs)) + .load(conn)?; let invalid_categories = slugs .iter() .cloned() @@ -85,7 +68,7 @@ impl Category { }) } - pub fn count_toplevel(conn: &PgConnection) -> QueryResult { + pub fn count_toplevel(conn: &mut PgConnection) -> QueryResult { use self::categories::dsl::*; categories @@ -95,7 +78,7 @@ impl Category { } pub fn toplevel( - conn: &PgConnection, + conn: &mut PgConnection, sort: &str, limit: i64, offset: i64, @@ -115,7 +98,7 @@ impl Category { .load(conn) } - pub fn subcategories(&self, conn: &PgConnection) -> QueryResult> { + pub fn subcategories(&self, conn: &mut PgConnection) -> QueryResult> { use diesel::sql_types::Text; sql_query(include_str!("../subcategories.sql")) @@ -127,7 +110,7 @@ impl Category { /// Returns categories as a Vector in order of traversal, not including this Category. /// The intention is to be able to have slugs or parent categories arrayed in order, to /// offer the frontend, for examples, slugs to create links to each parent category in turn. - pub fn parent_categories(&self, conn: &PgConnection) -> QueryResult> { + pub fn parent_categories(&self, conn: &mut PgConnection) -> QueryResult> { use diesel::sql_types::Text; sql_query(include_str!("../parent_categories.sql")) @@ -139,7 +122,7 @@ impl Category { /// Struct for inserting categories; only used in tests. Actual categories are inserted /// in src/boot/categories.rs. #[derive(Insertable, AsChangeset, Default, Debug)] -#[table_name = "categories"] +#[diesel(table_name = categories)] pub struct NewCategory<'a> { pub category: &'a str, pub slug: &'a str, @@ -148,7 +131,7 @@ pub struct NewCategory<'a> { impl<'a> NewCategory<'a> { /// Inserts the category into the database, or updates an existing one. - pub fn create_or_update(&self, conn: &PgConnection) -> QueryResult { + pub fn create_or_update(&self, conn: &mut PgConnection) -> QueryResult { use crate::schema::categories::dsl::*; insert_into(categories) @@ -167,7 +150,7 @@ mod tests { use diesel::connection::SimpleConnection; fn pg_connection() -> PgConnection { - let conn = pg_connection_no_transaction(); + let mut conn = pg_connection_no_transaction(); // These tests deadlock if run concurrently conn.batch_execute("BEGIN; LOCK categories IN ACCESS EXCLUSIVE MODE") .unwrap(); @@ -177,17 +160,17 @@ mod tests { #[test] fn category_toplevel_excludes_subcategories() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 2"), slug.eq("cat2")), (category.eq("Cat 1"), slug.eq("cat1")), (category.eq("Cat 1::sub"), slug.eq("cat1::sub")), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cats = Category::toplevel(&conn, "", 10, 0) + let cats = Category::toplevel(conn, "", 10, 0) .unwrap() .into_iter() .map(|c| c.category) @@ -199,17 +182,17 @@ mod tests { #[test] fn category_toplevel_orders_by_crates_cnt_when_sort_given() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 1"), slug.eq("cat1"), crates_cnt.eq(0)), (category.eq("Cat 2"), slug.eq("cat2"), crates_cnt.eq(2)), (category.eq("Cat 3"), slug.eq("cat3"), crates_cnt.eq(1)), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cats = Category::toplevel(&conn, "crates", 10, 0) + let cats = Category::toplevel(conn, "crates", 10, 0) .unwrap() .into_iter() .map(|c| c.category) @@ -225,16 +208,16 @@ mod tests { #[test] fn category_toplevel_applies_limit_and_offset() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 1"), slug.eq("cat1")), (category.eq("Cat 2"), slug.eq("cat2")), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cats = Category::toplevel(&conn, "", 1, 0) + let cats = Category::toplevel(conn, "", 1, 0) .unwrap() .into_iter() .map(|c| c.category) @@ -242,7 +225,7 @@ mod tests { let expected = vec!["Cat 1".to_string()]; assert_eq!(expected, cats); - let cats = Category::toplevel(&conn, "", 1, 1) + let cats = Category::toplevel(conn, "", 1, 1) .unwrap() .into_iter() .map(|c| c.category) @@ -254,7 +237,7 @@ mod tests { #[test] fn category_toplevel_includes_subcategories_in_crate_cnt() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 1"), slug.eq("cat1"), crates_cnt.eq(1)), @@ -276,10 +259,10 @@ mod tests { ), (category.eq("Cat 3"), slug.eq("cat3"), crates_cnt.eq(6)), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cats = Category::toplevel(&conn, "crates", 10, 0) + let cats = Category::toplevel(conn, "crates", 10, 0) .unwrap() .into_iter() .map(|c| (c.category, c.crates_cnt)) @@ -295,7 +278,7 @@ mod tests { #[test] fn category_toplevel_applies_limit_and_offset_after_grouping() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 1"), slug.eq("cat1"), crates_cnt.eq(1)), @@ -317,10 +300,10 @@ mod tests { ), (category.eq("Cat 3"), slug.eq("cat3"), crates_cnt.eq(6)), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cats = Category::toplevel(&conn, "crates", 2, 0) + let cats = Category::toplevel(conn, "crates", 2, 0) .unwrap() .into_iter() .map(|c| (c.category, c.crates_cnt)) @@ -328,7 +311,7 @@ mod tests { let expected = vec![("Cat 2".to_string(), 12), ("Cat 3".to_string(), 6)]; assert_eq!(expected, cats); - let cats = Category::toplevel(&conn, "crates", 2, 1) + let cats = Category::toplevel(conn, "crates", 2, 1) .unwrap() .into_iter() .map(|c| (c.category, c.crates_cnt)) @@ -340,7 +323,7 @@ mod tests { #[test] fn category_parent_categories_includes_path_to_node_with_count() { use self::categories::dsl::*; - let conn = pg_connection(); + let conn = &mut pg_connection(); insert_into(categories) .values(&vec![ (category.eq("Cat 1"), slug.eq("cat1"), crates_cnt.eq(1)), @@ -372,12 +355,12 @@ mod tests { ), (category.eq("Cat 3"), slug.eq("cat3"), crates_cnt.eq(200)), ]) - .execute(&conn) + .execute(conn) .unwrap(); - let cat: Category = Category::by_slug("cat1::sub1").first(&conn).unwrap(); - let subcats = cat.subcategories(&conn).unwrap(); - let parents = cat.parent_categories(&conn).unwrap(); + let cat: Category = Category::by_slug("cat1::sub1").first(conn).unwrap(); + let subcats = cat.subcategories(conn).unwrap(); + let parents = cat.parent_categories(conn).unwrap(); assert_eq!(parents.len(), 1); assert_eq!(parents[0].slug, "cat1"); diff --git a/src/models/crate_owner_invitation.rs b/src/models/crate_owner_invitation.rs index ba0ff2174b4..847fbd4e8f2 100644 --- a/src/models/crate_owner_invitation.rs +++ b/src/models/crate_owner_invitation.rs @@ -14,7 +14,7 @@ pub enum NewCrateOwnerInvitationOutcome { /// The model representing a row in the `crate_owner_invitations` database table. #[derive(Clone, Debug, PartialEq, Eq, Identifiable, Queryable)] -#[primary_key(invited_user_id, crate_id)] +#[diesel(primary_key(invited_user_id, crate_id))] pub struct CrateOwnerInvitation { pub invited_user_id: i32, pub invited_by_user_id: i32, @@ -29,11 +29,11 @@ impl CrateOwnerInvitation { invited_user_id: i32, invited_by_user_id: i32, crate_id: i32, - conn: &PgConnection, + conn: &mut PgConnection, config: &config::Server, ) -> AppResult { #[derive(Insertable, Clone, Copy, Debug)] - #[table_name = "crate_owner_invitations"] + #[diesel(table_name = crate_owner_invitations)] struct NewRecord { invited_user_id: i32, invited_by_user_id: i32, @@ -43,7 +43,7 @@ impl CrateOwnerInvitation { // Before actually creating the invite, check if an expired invitation already exists // and delete it from the database. This allows obtaining a new invite if the old one // expired, instead of returning "already exists". - conn.transaction(|| -> AppResult<()> { + conn.transaction(|conn| -> AppResult<()> { // This does a SELECT FOR UPDATE + DELETE instead of a DELETE with a WHERE clause to // use the model's `is_expired` method, centralizing our expiration checking logic. let existing: Option = crate_owner_invitations::table @@ -81,19 +81,19 @@ impl CrateOwnerInvitation { }) } - pub fn find_by_id(user_id: i32, crate_id: i32, conn: &PgConnection) -> AppResult { + pub fn find_by_id(user_id: i32, crate_id: i32, conn: &mut PgConnection) -> AppResult { Ok(crate_owner_invitations::table .find((user_id, crate_id)) .first::(conn)?) } - pub fn find_by_token(token: &str, conn: &PgConnection) -> AppResult { + pub fn find_by_token(token: &str, conn: &mut PgConnection) -> AppResult { Ok(crate_owner_invitations::table .filter(crate_owner_invitations::token.eq(token)) .first::(conn)?) } - pub fn accept(self, conn: &PgConnection, config: &config::Server) -> AppResult<()> { + pub fn accept(self, conn: &mut PgConnection, config: &config::Server) -> AppResult<()> { if self.is_expired(config) { let crate_name = crates::table .find(self.crate_id) @@ -102,7 +102,7 @@ impl CrateOwnerInvitation { return Err(Box::new(OwnershipInvitationExpired { crate_name })); } - conn.transaction(|| { + conn.transaction(|conn| { diesel::insert_into(crate_owners::table) .values(&CrateOwner { crate_id: self.crate_id, @@ -122,7 +122,7 @@ impl CrateOwnerInvitation { }) } - pub fn decline(self, conn: &PgConnection) -> AppResult<()> { + pub fn decline(self, conn: &mut PgConnection) -> AppResult<()> { // The check to prevent declining expired invitations is *explicitly* missing. We do not // care if an expired invitation is declined, as that just removes the invitation from the // database. diff --git a/src/models/dependency.rs b/src/models/dependency.rs index b841031ba70..6e72d0ef99a 100644 --- a/src/models/dependency.rs +++ b/src/models/dependency.rs @@ -1,15 +1,15 @@ use diesel::deserialize::{self, FromSql}; use diesel::pg::Pg; -use diesel::sql_types::Integer; +use diesel::sql_types::{Integer, Text}; use crate::models::{Crate, Version}; use crate::schema::*; use cargo_registry_index::DependencyKind as IndexDependencyKind; #[derive(Identifiable, Associations, Debug, Queryable, QueryableByName)] -#[belongs_to(Version)] -#[belongs_to(Crate)] -#[table_name = "dependencies"] +#[diesel(belongs_to(Version))] +#[diesel(belongs_to(Crate))] +#[diesel(table_name = dependencies)] pub struct Dependency { pub id: i32, pub version_id: i32, @@ -27,10 +27,10 @@ pub struct Dependency { pub struct ReverseDependency { #[diesel(embed)] pub dependency: Dependency, - #[sql_type = "::diesel::sql_types::Integer"] + #[diesel(sql_type = Integer)] pub crate_downloads: i32, - #[sql_type = "::diesel::sql_types::Text"] - #[column_name = "crate_name"] + #[diesel(sql_type = Text)] + #[diesel(column_name = crate_name)] pub name: String, } @@ -55,7 +55,7 @@ impl From for IndexDependencyKind { } impl FromSql for DependencyKind { - fn from_sql(bytes: Option<&[u8]>) -> deserialize::Result { + fn from_sql(bytes: diesel::pg::PgValue<'_>) -> deserialize::Result { match >::from_sql(bytes)? { 0 => Ok(DependencyKind::Normal), 1 => Ok(DependencyKind::Build), diff --git a/src/models/download.rs b/src/models/download.rs index 4b9ad49e300..f4659fa20fc 100644 --- a/src/models/download.rs +++ b/src/models/download.rs @@ -3,8 +3,8 @@ use crate::schema::version_downloads; use chrono::NaiveDate; #[derive(Queryable, Identifiable, Associations, Debug, Clone, Copy)] -#[belongs_to(Version)] -#[primary_key(version_id, date)] +#[diesel(belongs_to(Version))] +#[diesel(primary_key(version_id, date))] pub struct VersionDownload { pub version_id: i32, pub downloads: i32, diff --git a/src/models/email.rs b/src/models/email.rs index 00d9191aaff..4df12b24188 100644 --- a/src/models/email.rs +++ b/src/models/email.rs @@ -4,7 +4,7 @@ use crate::models::User; use crate::schema::emails; #[derive(Debug, Queryable, AsChangeset, Identifiable, Associations)] -#[belongs_to(User)] +#[diesel(belongs_to(User))] pub struct Email { pub id: i32, pub user_id: i32, @@ -15,7 +15,7 @@ pub struct Email { } #[derive(Debug, Insertable, AsChangeset)] -#[table_name = "emails"] +#[diesel(table_name = emails)] pub struct NewEmail<'a> { pub user_id: i32, pub email: &'a str, diff --git a/src/models/follow.rs b/src/models/follow.rs index ebe98e703e3..f06835ee361 100644 --- a/src/models/follow.rs +++ b/src/models/follow.rs @@ -2,9 +2,9 @@ use crate::models::User; use crate::schema::follows; #[derive(Insertable, Queryable, Identifiable, Associations, Clone, Copy, Debug)] -#[belongs_to(User)] -#[primary_key(user_id, crate_id)] -#[table_name = "follows"] +#[diesel(belongs_to(User))] +#[diesel(primary_key(user_id, crate_id))] +#[diesel(table_name = follows)] pub struct Follow { pub user_id: i32, pub crate_id: i32, diff --git a/src/models/helpers/with_count.rs b/src/models/helpers/with_count.rs index a63d7ca5701..ccb5c225867 100644 --- a/src/models/helpers/with_count.rs +++ b/src/models/helpers/with_count.rs @@ -1,8 +1,10 @@ +use diesel::sql_types::BigInt; + #[derive(QueryableByName, Queryable, Debug)] pub struct WithCount { #[diesel(embed)] pub(crate) record: T, - #[sql_type = "::diesel::sql_types::BigInt"] + #[diesel(sql_type = BigInt)] pub(crate) total: i64, } diff --git a/src/models/keyword.rs b/src/models/keyword.rs index e8f3767e46b..e64fbb87054 100644 --- a/src/models/keyword.rs +++ b/src/models/keyword.rs @@ -14,25 +14,26 @@ pub struct Keyword { } #[derive(Associations, Insertable, Identifiable, Debug, Clone, Copy)] -#[belongs_to(Keyword)] -#[belongs_to(Crate)] -#[table_name = "crates_keywords"] -#[primary_key(crate_id, keyword_id)] +#[diesel(belongs_to(Keyword))] +#[diesel(belongs_to(Crate))] +#[diesel(table_name = crates_keywords)] +#[diesel(primary_key(crate_id, keyword_id))] pub struct CrateKeyword { crate_id: i32, keyword_id: i32, } impl Keyword { - pub fn find_by_keyword(conn: &PgConnection, name: &str) -> QueryResult { + pub fn find_by_keyword(conn: &mut PgConnection, name: &str) -> QueryResult { keywords::table .filter(keywords::keyword.eq(lower(name))) .first(conn) } - pub fn find_or_create_all(conn: &PgConnection, names: &[&str]) -> QueryResult> { - use diesel::dsl::any; - + pub fn find_or_create_all( + conn: &mut PgConnection, + names: &[&str], + ) -> QueryResult> { let lowercase_names: Vec<_> = names.iter().map(|s| s.to_lowercase()).collect(); let new_keywords: Vec<_> = lowercase_names @@ -45,7 +46,7 @@ impl Keyword { .on_conflict_do_nothing() .execute(conn)?; keywords::table - .filter(keywords::keyword.eq(any(&lowercase_names))) + .filter(keywords::keyword.eq_any(&lowercase_names)) .load(conn) } @@ -59,8 +60,12 @@ impl Keyword { && chars.all(|c| c.is_ascii_alphanumeric() || c == '_' || c == '-' || c == '+') } - pub fn update_crate(conn: &PgConnection, krate: &Crate, keywords: &[&str]) -> QueryResult<()> { - conn.transaction(|| { + pub fn update_crate( + conn: &mut PgConnection, + krate: &Crate, + keywords: &[&str], + ) -> QueryResult<()> { + conn.transaction(|conn| { let keywords = Keyword::find_or_create_all(conn, keywords)?; diesel::delete(CrateKeyword::belonging_to(krate)).execute(conn)?; let crate_keywords = keywords @@ -86,7 +91,7 @@ mod tests { fn pg_connection() -> PgConnection { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); - let conn = PgConnection::establish(&database_url).unwrap(); + let mut conn = PgConnection::establish(&database_url).unwrap(); // These tests deadlock if run concurrently conn.batch_execute("BEGIN;").unwrap(); conn @@ -94,16 +99,16 @@ mod tests { #[test] fn dont_associate_with_non_lowercased_keywords() { - let conn = pg_connection(); + let conn = &mut pg_connection(); // The code should be preventing lowercased keywords from existing, // but if one happens to sneak in there, don't associate crates with it. diesel::insert_into(keywords::table) .values(keywords::keyword.eq("NO")) - .execute(&conn) + .execute(conn) .unwrap(); - let associated = Keyword::find_or_create_all(&conn, &["no"]).unwrap(); + let associated = Keyword::find_or_create_all(conn, &["no"]).unwrap(); assert_eq!(associated.len(), 1); assert_eq!(associated.first().unwrap().keyword, "no"); } diff --git a/src/models/krate.rs b/src/models/krate.rs index 20dd387c74a..e4b1d540b60 100644 --- a/src/models/krate.rs +++ b/src/models/krate.rs @@ -20,16 +20,16 @@ use crate::schema::*; use crate::sql::canon_crate_name; #[derive(Debug, Queryable, Identifiable, Associations, Clone, Copy)] -#[belongs_to(Crate)] -#[primary_key(crate_id)] -#[table_name = "recent_crate_downloads"] +#[diesel(belongs_to(Crate))] +#[diesel(primary_key(crate_id))] +#[diesel(table_name = recent_crate_downloads)] pub struct RecentCrateDownloads { pub crate_id: i32, pub downloads: i32, } -#[derive(Debug, Clone, Queryable, Identifiable, Associations, AsChangeset, QueryableByName)] -#[table_name = "crates"] +#[derive(Debug, Clone, Queryable, Identifiable, AsChangeset, QueryableByName)] +#[diesel(table_name = crates)] pub struct Crate { pub id: i32, pub name: String, @@ -80,9 +80,9 @@ type ByName<'a> = diesel::dsl::Filter>; type ByExactName<'a> = diesel::dsl::Filter>; #[derive(Insertable, AsChangeset, Default, Debug)] -#[table_name = "crates"] -#[changeset_options(treat_none_as_null = "true")] -#[primary_key(name, max_upload_size)] // This is actually just to skip updating them +#[diesel(table_name = crates)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(name, max_upload_size))] // This is actually just to skip updating them pub struct NewCrate<'a> { pub name: &'a str, pub description: Option<&'a str>, @@ -96,7 +96,7 @@ pub struct NewCrate<'a> { impl<'a> NewCrate<'a> { pub fn create_or_update( self, - conn: &PgConnection, + conn: &mut PgConnection, uploader: i32, rate_limit: Option<&PublishRateLimit>, ) -> AppResult { @@ -105,7 +105,7 @@ impl<'a> NewCrate<'a> { self.validate()?; self.ensure_name_not_reserved(conn)?; - conn.transaction(|| { + conn.transaction(|conn| { // To avoid race conditions, we try to insert // first so we know whether to add an owner if let Some(krate) = self.save_new_crate(conn, uploader)? { @@ -151,7 +151,7 @@ impl<'a> NewCrate<'a> { Ok(()) } - fn ensure_name_not_reserved(&self, conn: &PgConnection) -> AppResult<()> { + fn ensure_name_not_reserved(&self, conn: &mut PgConnection) -> AppResult<()> { use crate::schema::reserved_crate_names::dsl::*; use diesel::dsl::exists; use diesel::select; @@ -167,10 +167,10 @@ impl<'a> NewCrate<'a> { } } - fn save_new_crate(&self, conn: &PgConnection, user_id: i32) -> QueryResult> { + fn save_new_crate(&self, conn: &mut PgConnection, user_id: i32) -> QueryResult> { use crate::schema::crates::dsl::*; - conn.transaction(|| { + conn.transaction(|conn| { let maybe_inserted: Option = diesel::insert_into(crates) .values(self) .on_conflict_do_nothing() @@ -211,7 +211,7 @@ impl Crate { let wildcard_name = format!("%{name}%"); Box::new(canon_crate_name(crates::name).like(canon_crate_name(wildcard_name))) } else { - diesel_infix_operator!(MatchesWord, "%>"); + diesel::infix_operator!(MatchesWord, "%>"); Box::new(MatchesWord::new( canon_crate_name(crates::name), name.into_sql::(), @@ -236,7 +236,7 @@ impl Crate { crates::table.select(ALL_COLUMNS) } - pub fn find_version(&self, conn: &PgConnection, version: &str) -> AppResult { + pub fn find_version(&self, conn: &mut PgConnection, version: &str) -> AppResult { self.all_versions() .filter(versions::num.eq(version)) .first(conn) @@ -306,7 +306,7 @@ impl Crate { /// Return both the newest (most recently updated) and /// highest version (in semver order) for the current crate. - pub fn top_versions(&self, conn: &PgConnection) -> QueryResult { + pub fn top_versions(&self, conn: &mut PgConnection) -> QueryResult { use crate::schema::versions::dsl::*; Ok(TopVersions::from_date_version_pairs( @@ -314,7 +314,7 @@ impl Crate { )) } - pub fn owners(&self, conn: &PgConnection) -> QueryResult> { + pub fn owners(&self, conn: &mut PgConnection) -> QueryResult> { let users = CrateOwner::by_owner_kind(OwnerKind::User) .filter(crate_owners::crate_id.eq(self.id)) .inner_join(users::table) @@ -336,7 +336,7 @@ impl Crate { pub fn owner_add( &self, app: &App, - conn: &PgConnection, + conn: &mut PgConnection, req_user: &User, login: &str, ) -> AppResult { @@ -400,7 +400,7 @@ impl Crate { pub fn owner_remove( &self, app: &App, - conn: &PgConnection, + conn: &mut PgConnection, req_user: &User, login: &str, ) -> AppResult<()> { @@ -416,7 +416,7 @@ impl Crate { /// Returns (dependency, dependent crate name, dependent crate downloads) pub(crate) fn reverse_dependencies( &self, - conn: &PgConnection, + conn: &mut PgConnection, options: PaginationOptions, ) -> AppResult<(Vec, i64)> { use diesel::sql_query; @@ -426,8 +426,8 @@ impl Crate { let rows: Vec> = sql_query(include_str!("krate_reverse_dependencies.sql")) .bind::(self.id) - .bind::(i64::from(offset)) - .bind::(i64::from(options.per_page)) + .bind::(offset) + .bind::(options.per_page) .load(conn)?; Ok(rows.records_and_total()) diff --git a/src/models/owner.rs b/src/models/owner.rs index 69fda2544ab..cee9166a99f 100644 --- a/src/models/owner.rs +++ b/src/models/owner.rs @@ -9,11 +9,11 @@ use crate::schema::{crate_owners, users}; use crate::sql::lower; #[derive(Insertable, Associations, Identifiable, Debug, Clone, Copy)] -#[belongs_to(Crate)] -#[belongs_to(User, foreign_key = "owner_id")] -#[belongs_to(Team, foreign_key = "owner_id")] -#[table_name = "crate_owners"] -#[primary_key(crate_id, owner_id, owner_kind)] +#[diesel(belongs_to(Crate))] +#[diesel(belongs_to(User, foreign_key = owner_id))] +#[diesel(belongs_to(Team, foreign_key = owner_id))] +#[diesel(table_name = crate_owners)] +#[diesel(primary_key(crate_id, owner_id, owner_kind))] pub struct CrateOwner { pub crate_id: i32, pub owner_id: i32, @@ -60,7 +60,7 @@ impl Owner { /// sensitive. pub fn find_or_create_by_login( app: &App, - conn: &PgConnection, + conn: &mut PgConnection, req_user: &User, name: &str, ) -> AppResult { diff --git a/src/models/team.rs b/src/models/team.rs index 6707bb8a8b2..f7e117a0421 100644 --- a/src/models/team.rs +++ b/src/models/team.rs @@ -30,7 +30,7 @@ pub struct Team { } #[derive(Insertable, AsChangeset, Debug)] -#[table_name = "teams"] +#[diesel(table_name = teams)] pub struct NewTeam<'a> { pub login: &'a str, pub github_id: i32, @@ -56,7 +56,7 @@ impl<'a> NewTeam<'a> { } } - pub fn create_or_update(&self, conn: &PgConnection) -> QueryResult { + pub fn create_or_update(&self, conn: &mut PgConnection) -> QueryResult { use crate::schema::teams::dsl::*; use diesel::insert_into; @@ -77,7 +77,7 @@ impl Team { /// This function will panic if login contains less than 2 `:` characters. pub fn create_or_update( app: &App, - conn: &PgConnection, + conn: &mut PgConnection, login: &str, req_user: &User, ) -> AppResult { @@ -116,7 +116,7 @@ impl Team { /// convenience to avoid rebuilding it. fn create_or_update_github_team( app: &App, - conn: &PgConnection, + conn: &mut PgConnection, login: &str, org_name: &str, team_name: &str, @@ -183,7 +183,7 @@ impl Team { } } - pub fn owning(krate: &Crate, conn: &PgConnection) -> QueryResult> { + pub fn owning(krate: &Crate, conn: &mut PgConnection) -> QueryResult> { let base_query = CrateOwner::belonging_to(krate).filter(crate_owners::deleted.eq(false)); let teams = base_query .inner_join(teams::table) diff --git a/src/models/token.rs b/src/models/token.rs index e7094f8f911..6a1eacb5514 100644 --- a/src/models/token.rs +++ b/src/models/token.rs @@ -12,7 +12,7 @@ use crate::util::token::{SecureToken, SecureTokenKind}; /// The model representing a row in the `api_tokens` database table. #[derive(Clone, Debug, PartialEq, Eq, Identifiable, Queryable, Associations, Serialize)] -#[belongs_to(User)] +#[diesel(belongs_to(User))] pub struct ApiToken { pub id: i32, #[serde(skip)] @@ -36,12 +36,12 @@ pub struct ApiToken { impl ApiToken { /// Generates a new named API token for a user - pub fn insert(conn: &PgConnection, user_id: i32, name: &str) -> AppResult { + pub fn insert(conn: &mut PgConnection, user_id: i32, name: &str) -> AppResult { Self::insert_with_scopes(conn, user_id, name, None, None) } pub fn insert_with_scopes( - conn: &PgConnection, + conn: &mut PgConnection, user_id: i32, name: &str, crate_scopes: Option>, @@ -65,7 +65,7 @@ impl ApiToken { }) } - pub fn find_by_api_token(conn: &PgConnection, token_: &str) -> AppResult { + pub fn find_by_api_token(conn: &mut PgConnection, token_: &str) -> AppResult { use crate::schema::api_tokens::dsl::*; use diesel::{dsl::now, update}; @@ -78,7 +78,7 @@ impl ApiToken { // If the database is in read only mode, we can't update last_used_at. // Try updating in a new transaction, if that fails, fall back to reading - conn.transaction(|| { + conn.transaction(|conn| { update(tokens) .set(last_used_at.eq(now.nullable())) .get_result(conn) diff --git a/src/models/token/scopes.rs b/src/models/token/scopes.rs index 7cceadb0b1d..270f0ea14f5 100644 --- a/src/models/token/scopes.rs +++ b/src/models/token/scopes.rs @@ -6,7 +6,7 @@ use diesel::sql_types::Text; use std::io::Write; #[derive(Clone, Copy, Debug, PartialEq, Eq, AsExpression)] -#[sql_type = "Text"] +#[diesel(sql_type = Text)] pub enum EndpointScope { PublishNew, PublishUpdate, @@ -26,7 +26,7 @@ impl From<&EndpointScope> for &[u8] { } impl ToSql for EndpointScope { - fn to_sql(&self, out: &mut Output<'_, W, Pg>) -> serialize::Result { + fn to_sql(&self, out: &mut Output<'_, '_, Pg>) -> serialize::Result { out.write_all(self.into())?; Ok(IsNull::No) } @@ -47,8 +47,9 @@ impl TryFrom<&[u8]> for EndpointScope { } impl FromSql for EndpointScope { - fn from_sql(bytes: Option<&[u8]>) -> deserialize::Result { - Ok(EndpointScope::try_from(not_none!(bytes))?) + fn from_sql(bytes: diesel::pg::PgValue<'_>) -> deserialize::Result { + let value = >::from_sql(bytes)?; + Ok(EndpointScope::try_from(value.as_bytes())?) } } @@ -82,15 +83,15 @@ impl TryFrom for CrateScope { } impl FromSql for CrateScope { - fn from_sql(bytes: Option<&[u8]>) -> deserialize::Result { + fn from_sql(bytes: diesel::pg::PgValue<'_>) -> deserialize::Result { let value = >::from_sql(bytes)?; Ok(CrateScope::try_from(value)?) } } impl ToSql for CrateScope { - fn to_sql(&self, out: &mut Output<'_, W, Pg>) -> serialize::Result { - >::to_sql(&self.pattern, out) + fn to_sql(&self, out: &mut Output<'_, '_, Pg>) -> serialize::Result { + ToSql::::to_sql(&self.pattern, &mut out.reborrow()) } } diff --git a/src/models/user.rs b/src/models/user.rs index ed4968be81b..0073c9d8dec 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -10,7 +10,7 @@ use crate::models::{ApiToken, Crate, CrateOwner, Email, NewEmail, Owner, OwnerKi use crate::schema::{crate_owners, emails, users}; /// The model representing a row in the `users` database table. -#[derive(Clone, Debug, PartialEq, Eq, Queryable, Identifiable, AsChangeset, Associations)] +#[derive(Clone, Debug, PartialEq, Eq, Queryable, Identifiable, AsChangeset)] pub struct User { pub id: i32, pub gh_access_token: String, @@ -24,7 +24,7 @@ pub struct User { /// Represents a new user record insertable to the `users` table #[derive(Insertable, Debug, Default)] -#[table_name = "users"] +#[diesel(table_name = users)] pub struct NewUser<'a> { pub gh_id: i32, pub gh_login: &'a str, @@ -55,7 +55,7 @@ impl<'a> NewUser<'a> { &self, email: Option<&'a str>, emails: &Emails, - conn: &PgConnection, + conn: &mut PgConnection, ) -> QueryResult { use crate::schema::users::dsl::*; use diesel::dsl::sql; @@ -63,7 +63,7 @@ impl<'a> NewUser<'a> { use diesel::pg::upsert::excluded; use diesel::sql_types::Integer; - conn.transaction(|| { + conn.transaction(|conn| { let user: User = insert_into(users) .values(self) // We need the `WHERE gh_id > 0` condition here because `gh_id` set @@ -110,18 +110,18 @@ impl<'a> NewUser<'a> { } impl User { - pub fn find(conn: &PgConnection, id: i32) -> QueryResult { + pub fn find(conn: &mut PgConnection, id: i32) -> QueryResult { users::table.find(id).first(conn) } /// Queries the database for a user with a certain `api_token` value. - pub fn find_by_api_token(conn: &PgConnection, token: &str) -> AppResult { + pub fn find_by_api_token(conn: &mut PgConnection, token: &str) -> AppResult { let api_token = ApiToken::find_by_api_token(conn, token)?; Ok(Self::find(conn, api_token.user_id)?) } - pub fn owning(krate: &Crate, conn: &PgConnection) -> QueryResult> { + pub fn owning(krate: &Crate, conn: &mut PgConnection) -> QueryResult> { let users = CrateOwner::by_owner_kind(OwnerKind::User) .inner_join(users::table) .select(users::all_columns) @@ -162,7 +162,7 @@ impl User { /// Queries the database for the verified emails /// belonging to a given user - pub fn verified_email(&self, conn: &PgConnection) -> QueryResult> { + pub fn verified_email(&self, conn: &mut PgConnection) -> QueryResult> { Email::belonging_to(self) .select(emails::email) .filter(emails::verified.eq(true)) @@ -171,7 +171,7 @@ impl User { } /// Queries for the email belonging to a particular user - pub fn email(&self, conn: &PgConnection) -> QueryResult> { + pub fn email(&self, conn: &mut PgConnection) -> QueryResult> { Email::belonging_to(self) .select(emails::email) .first(conn) diff --git a/src/models/version.rs b/src/models/version.rs index 55a2e5a9338..d493f304289 100644 --- a/src/models/version.rs +++ b/src/models/version.rs @@ -10,7 +10,7 @@ use crate::schema::*; // Queryable has a custom implementation below #[derive(Clone, Identifiable, Associations, Debug, Queryable, Deserialize, Serialize)] -#[belongs_to(Crate)] +#[diesel(belongs_to(Crate))] pub struct Version { pub id: i32, pub crate_id: i32, @@ -28,7 +28,7 @@ pub struct Version { } #[derive(Insertable, Debug)] -#[table_name = "versions"] +#[diesel(table_name = versions)] pub struct NewVersion { crate_id: i32, num: String, @@ -94,7 +94,7 @@ impl TopVersions { impl Version { /// Returns (dependency, crate dependency name) - pub fn dependencies(&self, conn: &PgConnection) -> QueryResult> { + pub fn dependencies(&self, conn: &mut PgConnection) -> QueryResult> { Dependency::belonging_to(self) .inner_join(crates::table) .select((dependencies::all_columns, crates::name)) @@ -102,7 +102,10 @@ impl Version { .load(conn) } - pub fn record_readme_rendering(version_id_: i32, conn: &PgConnection) -> QueryResult { + pub fn record_readme_rendering( + version_id_: i32, + conn: &mut PgConnection, + ) -> QueryResult { use crate::schema::readme_renderings::dsl::*; use diesel::dsl::now; @@ -116,7 +119,7 @@ impl Version { /// Gets the User who ran `cargo publish` for this version, if recorded. /// Not for use when you have a group of versions you need the publishers for. - pub fn published_by(&self, conn: &PgConnection) -> Option { + pub fn published_by(&self, conn: &mut PgConnection) -> Option { match self.published_by { Some(pb) => users::table.find(pb).first(conn).ok(), None => None, @@ -155,12 +158,12 @@ impl NewVersion { Ok(new_version) } - pub fn save(&self, conn: &PgConnection, published_by_email: &str) -> AppResult { + pub fn save(&self, conn: &mut PgConnection, published_by_email: &str) -> AppResult { use crate::schema::versions::dsl::*; use diesel::dsl::exists; use diesel::{insert_into, select}; - conn.transaction(|| { + conn.transaction(|conn| { let already_uploaded = versions .filter(crate_id.eq(self.crate_id)) .filter(num.eq(&self.num)); diff --git a/src/publish_rate_limit.rs b/src/publish_rate_limit.rs index f6d03408663..d01f3628703 100644 --- a/src/publish_rate_limit.rs +++ b/src/publish_rate_limit.rs @@ -34,7 +34,7 @@ impl Default for PublishRateLimit { } #[derive(Queryable, Insertable, Debug, PartialEq, Clone, Copy)] -#[table_name = "publish_limit_buckets"] +#[diesel(table_name = publish_limit_buckets)] #[allow(dead_code)] // Most fields only read in tests struct Bucket { user_id: i32, @@ -43,7 +43,7 @@ struct Bucket { } impl PublishRateLimit { - pub fn check_rate_limit(&self, uploader: i32, conn: &PgConnection) -> AppResult<()> { + pub fn check_rate_limit(&self, uploader: i32, conn: &mut PgConnection) -> AppResult<()> { let bucket = self.take_token(uploader, Utc::now().naive_utc(), conn)?; if bucket.tokens >= 1 { Ok(()) @@ -66,7 +66,7 @@ impl PublishRateLimit { &self, uploader: i32, now: NaiveDateTime, - conn: &PgConnection, + conn: &mut PgConnection, ) -> QueryResult { use self::publish_limit_buckets::dsl::*; @@ -116,14 +116,14 @@ mod tests { #[test] fn take_token_with_no_bucket_creates_new_one() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let bucket = rate.take_token(new_user(&conn, "user1")?, now, &conn)?; + let bucket = rate.take_token(new_user(conn, "user1")?, now, conn)?; let expected = Bucket { user_id: bucket.user_id, tokens: 10, @@ -135,7 +135,7 @@ mod tests { rate: Duration::from_millis(50), burst: 20, }; - let bucket = rate.take_token(new_user(&conn, "user2")?, now, &conn)?; + let bucket = rate.take_token(new_user(conn, "user2")?, now, conn)?; let expected = Bucket { user_id: bucket.user_id, tokens: 20, @@ -147,15 +147,15 @@ mod tests { #[test] fn take_token_with_existing_bucket_modifies_existing_bucket() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user_bucket(&conn, 5, now)?.user_id; - let bucket = rate.take_token(user_id, now, &conn)?; + let user_id = new_user_bucket(conn, 5, now)?.user_id; + let bucket = rate.take_token(user_id, now, conn)?; let expected = Bucket { user_id, tokens: 4, @@ -167,16 +167,16 @@ mod tests { #[test] fn take_token_after_delay_refills() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user_bucket(&conn, 5, now)?.user_id; + let user_id = new_user_bucket(conn, 5, now)?.user_id; let refill_time = now + chrono::Duration::seconds(2); - let bucket = rate.take_token(user_id, refill_time, &conn)?; + let bucket = rate.take_token(user_id, refill_time, conn)?; let expected = Bucket { user_id, tokens: 6, @@ -188,7 +188,7 @@ mod tests { #[test] fn refill_subsecond_rate() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); // Subsecond rates have floating point rounding issues, so use a known // timestamp that rounds fine let now = @@ -199,9 +199,9 @@ mod tests { rate: Duration::from_millis(100), burst: 10, }; - let user_id = new_user_bucket(&conn, 5, now)?.user_id; + let user_id = new_user_bucket(conn, 5, now)?.user_id; let refill_time = now + chrono::Duration::milliseconds(300); - let bucket = rate.take_token(user_id, refill_time, &conn)?; + let bucket = rate.take_token(user_id, refill_time, conn)?; let expected = Bucket { user_id, tokens: 7, @@ -213,15 +213,15 @@ mod tests { #[test] fn last_refill_always_advanced_by_multiple_of_rate() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_millis(100), burst: 10, }; - let user_id = new_user_bucket(&conn, 5, now)?.user_id; - let bucket = rate.take_token(user_id, now + chrono::Duration::milliseconds(250), &conn)?; + let user_id = new_user_bucket(conn, 5, now)?.user_id; + let bucket = rate.take_token(user_id, now + chrono::Duration::milliseconds(250), conn)?; let expected_refill_time = now + chrono::Duration::milliseconds(200); let expected = Bucket { user_id, @@ -234,15 +234,15 @@ mod tests { #[test] fn zero_tokens_returned_when_user_has_no_tokens_left() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user_bucket(&conn, 1, now)?.user_id; - let bucket = rate.take_token(user_id, now, &conn)?; + let user_id = new_user_bucket(conn, 1, now)?.user_id; + let bucket = rate.take_token(user_id, now, conn)?; let expected = Bucket { user_id, tokens: 0, @@ -250,23 +250,23 @@ mod tests { }; assert_eq!(expected, bucket); - let bucket = rate.take_token(user_id, now, &conn)?; + let bucket = rate.take_token(user_id, now, conn)?; assert_eq!(expected, bucket); Ok(()) } #[test] fn a_user_with_no_tokens_gets_a_token_after_exactly_rate() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user_bucket(&conn, 0, now)?.user_id; + let user_id = new_user_bucket(conn, 0, now)?.user_id; let refill_time = now + chrono::Duration::seconds(1); - let bucket = rate.take_token(user_id, refill_time, &conn)?; + let bucket = rate.take_token(user_id, refill_time, conn)?; let expected = Bucket { user_id, tokens: 1, @@ -279,16 +279,16 @@ mod tests { #[test] fn tokens_never_refill_past_burst() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user_bucket(&conn, 8, now)?.user_id; + let user_id = new_user_bucket(conn, 8, now)?.user_id; let refill_time = now + chrono::Duration::seconds(4); - let bucket = rate.take_token(user_id, refill_time, &conn)?; + let bucket = rate.take_token(user_id, refill_time, conn)?; let expected = Bucket { user_id, tokens: 10, @@ -301,25 +301,25 @@ mod tests { #[test] fn override_is_used_instead_of_global_burst_if_present() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user(&conn, "user1")?; - let other_user_id = new_user(&conn, "user2")?; + let user_id = new_user(conn, "user1")?; + let other_user_id = new_user(conn, "user2")?; diesel::insert_into(publish_rate_overrides::table) .values(( publish_rate_overrides::user_id.eq(user_id), publish_rate_overrides::burst.eq(20), )) - .execute(&conn)?; + .execute(conn)?; - let bucket = rate.take_token(user_id, now, &conn)?; - let other_bucket = rate.take_token(other_user_id, now, &conn)?; + let bucket = rate.take_token(user_id, now, conn)?; + let other_bucket = rate.take_token(other_user_id, now, conn)?; assert_eq!(20, bucket.tokens); assert_eq!(10, other_bucket.tokens); @@ -328,15 +328,15 @@ mod tests { #[test] fn overrides_can_expire() -> QueryResult<()> { - let conn = pg_connection(); + let conn = &mut pg_connection(); let now = now(); let rate = PublishRateLimit { rate: Duration::from_secs(1), burst: 10, }; - let user_id = new_user(&conn, "user1")?; - let other_user_id = new_user(&conn, "user2")?; + let user_id = new_user(conn, "user1")?; + let other_user_id = new_user(conn, "user2")?; diesel::insert_into(publish_rate_overrides::table) .values(( @@ -344,10 +344,10 @@ mod tests { publish_rate_overrides::burst.eq(20), publish_rate_overrides::expires_at.eq(now + chrono::Duration::days(30)), )) - .execute(&conn)?; + .execute(conn)?; - let bucket = rate.take_token(user_id, now, &conn)?; - let other_bucket = rate.take_token(other_user_id, now, &conn)?; + let bucket = rate.take_token(user_id, now, conn)?; + let other_bucket = rate.take_token(other_user_id, now, conn)?; assert_eq!(20, bucket.tokens); assert_eq!(10, other_bucket.tokens); @@ -356,10 +356,10 @@ mod tests { diesel::update(publish_rate_overrides::table) .set(publish_rate_overrides::expires_at.eq(now - chrono::Duration::days(30))) .filter(publish_rate_overrides::user_id.eq(user_id)) - .execute(&conn)?; + .execute(conn)?; - let bucket = rate.take_token(user_id, now, &conn)?; - let other_bucket = rate.take_token(other_user_id, now, &conn)?; + let bucket = rate.take_token(user_id, now, conn)?; + let other_bucket = rate.take_token(other_user_id, now, conn)?; // The number of tokens of user_id is 10 and not 9 because when the new burst limit is // lower than the amount of available tokens, the number of available tokens is reset to @@ -370,7 +370,7 @@ mod tests { Ok(()) } - fn new_user(conn: &PgConnection, gh_login: &str) -> QueryResult { + fn new_user(conn: &mut PgConnection, gh_login: &str) -> QueryResult { use crate::models::NewUser; let user = NewUser { @@ -382,7 +382,7 @@ mod tests { } fn new_user_bucket( - conn: &PgConnection, + conn: &mut PgConnection, tokens: i32, now: NaiveDateTime, ) -> QueryResult { diff --git a/src/schema.patch b/src/schema.patch index 1ba2baca8f9..2ce75b41f43 100644 --- a/src/schema.patch +++ b/src/schema.patch @@ -1,14 +1,35 @@ diff --git a/src/schema.rs b/src/schema.rs -index df884e4..18e08cd 100644 +index 80d3baaf0..c39c711e2 100644 --- a/src/schema.rs +++ b/src/schema.rs -@@ -1,3 +1,5 @@ -+#![allow(unused_imports)] -+ - table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; -@@ -171,12 +173,6 @@ +@@ -14,9 +14,7 @@ pub mod sql_types { + /// The `pg_catalog.tsvector` SQL type + /// + /// (Automatically generated by Diesel.) +- #[derive(diesel::sql_types::SqlType)] +- #[diesel(postgres_type(name = "tsvector", schema = "pg_catalog"))] +- pub struct Tsvector; ++ pub use diesel_full_text_search::Tsvector; + } + + diesel::table! { +@@ -71,13 +69,13 @@ diesel::table! { + /// Its SQL type is `Nullable>>`. + /// + /// (Automatically generated by Diesel.) +- crate_scopes -> Nullable>>, ++ crate_scopes -> Nullable>, + /// The `endpoint_scopes` column of the `api_tokens` table. + /// + /// Its SQL type is `Nullable>>`. + /// + /// (Automatically generated by Diesel.) +- endpoint_scopes -> Nullable>>, ++ endpoint_scopes -> Nullable>, + } + } + +@@ -195,12 +193,6 @@ diesel::table! { /// /// (Automatically generated by Diesel.) created_at -> Timestamp, @@ -21,10 +42,20 @@ index df884e4..18e08cd 100644 } } -@@ -678,6 +674,24 @@ +@@ -473,7 +465,7 @@ diesel::table! { + /// Its SQL type is `Array>`. + /// + /// (Automatically generated by Diesel.) +- features -> Array>, ++ features -> Array, + /// The `target` column of the `dependencies` table. + /// + /// Its SQL type is `Nullable`. +@@ -677,6 +669,24 @@ diesel::table! { + } } - table! { ++diesel::table! { + /// Representation of the `recent_crate_downloads` view. + /// + /// This data represents the downloads in the last 90 days. @@ -42,29 +73,28 @@ index df884e4..18e08cd 100644 + } +} + -+table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - -@@ -1003,7 +1017,8 @@ - joinable!(badges -> crates (crate_id)); - joinable!(crate_owner_invitations -> crates (crate_id)); - joinable!(crate_owners -> crates (crate_id)); --joinable!(crate_owners -> users (created_by)); -+joinable!(crate_owners -> teams (owner_id)); -+joinable!(crate_owners -> users (owner_id)); - joinable!(crates_categories -> categories (category_id)); - joinable!(crates_categories -> crates (crate_id)); - joinable!(crates_keywords -> crates (crate_id)); -@@ -1016,6 +1031,7 @@ - joinable!(publish_limit_buckets -> users (user_id)); - joinable!(publish_rate_overrides -> users (user_id)); - joinable!(readme_renderings -> versions (version_id)); -+joinable!(recent_crate_downloads -> crates (crate_id)); - joinable!(version_downloads -> versions (version_id)); - joinable!(version_owner_actions -> api_tokens (api_token_id)); - joinable!(version_owner_actions -> users (user_id)); -@@ -1043,6 +1059,7 @@ + diesel::table! { + /// Representation of the `reserved_crate_names` table. + /// +@@ -983,7 +993,8 @@ diesel::joinable!(api_tokens -> users (user_id)); + diesel::joinable!(badges -> crates (crate_id)); + diesel::joinable!(crate_owner_invitations -> crates (crate_id)); + diesel::joinable!(crate_owners -> crates (crate_id)); +-diesel::joinable!(crate_owners -> users (created_by)); ++diesel::joinable!(crate_owners -> teams (owner_id)); ++diesel::joinable!(crate_owners -> users (owner_id)); + diesel::joinable!(crates_categories -> categories (category_id)); + diesel::joinable!(crates_categories -> crates (crate_id)); + diesel::joinable!(crates_keywords -> crates (crate_id)); +@@ -996,6 +1007,7 @@ diesel::joinable!(follows -> users (user_id)); + diesel::joinable!(publish_limit_buckets -> users (user_id)); + diesel::joinable!(publish_rate_overrides -> users (user_id)); + diesel::joinable!(readme_renderings -> versions (version_id)); ++diesel::joinable!(recent_crate_downloads -> crates (crate_id)); + diesel::joinable!(version_downloads -> versions (version_id)); + diesel::joinable!(version_owner_actions -> api_tokens (api_token_id)); + diesel::joinable!(version_owner_actions -> users (user_id)); +@@ -1022,6 +1034,7 @@ diesel::allow_tables_to_appear_in_same_query!( publish_limit_buckets, publish_rate_overrides, readme_renderings, diff --git a/src/schema.rs b/src/schema.rs index f2a8546aa7d..c39c711e29d 100644 --- a/src/schema.rs +++ b/src/schema.rs @@ -1,9 +1,23 @@ -#![allow(unused_imports)] +// @generated automatically by Diesel CLI. -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; +/// A module containing custom SQL type definitions +/// +/// (Automatically generated by Diesel.) +pub mod sql_types { + /// The `ltree` SQL type + /// + /// (Automatically generated by Diesel.) + #[derive(diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "ltree"))] + pub struct Ltree; + + /// The `pg_catalog.tsvector` SQL type + /// + /// (Automatically generated by Diesel.) + pub use diesel_full_text_search::Tsvector; +} +diesel::table! { /// Representation of the `api_tokens` table. /// /// (Automatically generated by Diesel.) @@ -52,23 +66,20 @@ table! { revoked -> Bool, /// The `crate_scopes` column of the `api_tokens` table. /// - /// Its SQL type is `Nullable>`. + /// Its SQL type is `Nullable>>`. /// /// (Automatically generated by Diesel.) crate_scopes -> Nullable>, /// The `endpoint_scopes` column of the `api_tokens` table. /// - /// Its SQL type is `Nullable>`. + /// Its SQL type is `Nullable>>`. /// /// (Automatically generated by Diesel.) endpoint_scopes -> Nullable>, } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `background_jobs` table. /// /// (Automatically generated by Diesel.) @@ -112,10 +123,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `badges` table. /// /// (Automatically generated by Diesel.) @@ -141,9 +149,9 @@ table! { } } -table! { +diesel::table! { use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; + use super::sql_types::Ltree; /// Representation of the `categories` table. /// @@ -188,10 +196,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `crate_owner_invitations` table. /// /// (Automatically generated by Diesel.) @@ -235,10 +240,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `crate_owners` table. /// /// (Automatically generated by Diesel.) @@ -294,9 +296,9 @@ table! { } } -table! { +diesel::table! { use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; + use super::sql_types::Tsvector; /// Representation of the `crates` table. /// @@ -377,10 +379,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `crates_categories` table. /// /// (Automatically generated by Diesel.) @@ -400,10 +399,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `crates_keywords` table. /// /// (Automatically generated by Diesel.) @@ -423,10 +419,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `dependencies` table. /// /// (Automatically generated by Diesel.) @@ -469,7 +462,7 @@ table! { default_features -> Bool, /// The `features` column of the `dependencies` table. /// - /// Its SQL type is `Array`. + /// Its SQL type is `Array>`. /// /// (Automatically generated by Diesel.) features -> Array, @@ -494,10 +487,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `emails` table. /// /// (Automatically generated by Diesel.) @@ -541,10 +531,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `follows` table. /// /// (Automatically generated by Diesel.) @@ -564,10 +551,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `keywords` table. /// /// (Automatically generated by Diesel.) @@ -599,10 +583,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `metadata` table. /// /// (Automatically generated by Diesel.) @@ -616,10 +597,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `publish_limit_buckets` table. /// /// (Automatically generated by Diesel.) @@ -645,10 +623,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `publish_rate_overrides` table. /// /// (Automatically generated by Diesel.) @@ -674,10 +649,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `readme_renderings` table. /// /// (Automatically generated by Diesel.) @@ -697,7 +669,7 @@ table! { } } -table! { +diesel::table! { /// Representation of the `recent_crate_downloads` view. /// /// This data represents the downloads in the last 90 days. @@ -715,10 +687,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `reserved_crate_names` table. /// /// (Automatically generated by Diesel.) @@ -732,10 +701,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `teams` table. /// /// (Automatically generated by Diesel.) @@ -779,10 +745,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `users` table. /// /// (Automatically generated by Diesel.) @@ -838,10 +801,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `version_downloads` table. /// /// (Automatically generated by Diesel.) @@ -879,10 +839,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `version_owner_actions` table. /// /// (Automatically generated by Diesel.) @@ -926,10 +883,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `versions` table. /// /// (Automatically generated by Diesel.) @@ -1015,10 +969,7 @@ table! { } } -table! { - use diesel::sql_types::*; - use diesel_full_text_search::{TsVector as Tsvector}; - +diesel::table! { /// Representation of the `versions_published_by` table. /// /// (Automatically generated by Diesel.) @@ -1038,34 +989,34 @@ table! { } } -joinable!(api_tokens -> users (user_id)); -joinable!(badges -> crates (crate_id)); -joinable!(crate_owner_invitations -> crates (crate_id)); -joinable!(crate_owners -> crates (crate_id)); -joinable!(crate_owners -> teams (owner_id)); -joinable!(crate_owners -> users (owner_id)); -joinable!(crates_categories -> categories (category_id)); -joinable!(crates_categories -> crates (crate_id)); -joinable!(crates_keywords -> crates (crate_id)); -joinable!(crates_keywords -> keywords (keyword_id)); -joinable!(dependencies -> crates (crate_id)); -joinable!(dependencies -> versions (version_id)); -joinable!(emails -> users (user_id)); -joinable!(follows -> crates (crate_id)); -joinable!(follows -> users (user_id)); -joinable!(publish_limit_buckets -> users (user_id)); -joinable!(publish_rate_overrides -> users (user_id)); -joinable!(readme_renderings -> versions (version_id)); -joinable!(recent_crate_downloads -> crates (crate_id)); -joinable!(version_downloads -> versions (version_id)); -joinable!(version_owner_actions -> api_tokens (api_token_id)); -joinable!(version_owner_actions -> users (user_id)); -joinable!(version_owner_actions -> versions (version_id)); -joinable!(versions -> crates (crate_id)); -joinable!(versions -> users (published_by)); -joinable!(versions_published_by -> versions (version_id)); +diesel::joinable!(api_tokens -> users (user_id)); +diesel::joinable!(badges -> crates (crate_id)); +diesel::joinable!(crate_owner_invitations -> crates (crate_id)); +diesel::joinable!(crate_owners -> crates (crate_id)); +diesel::joinable!(crate_owners -> teams (owner_id)); +diesel::joinable!(crate_owners -> users (owner_id)); +diesel::joinable!(crates_categories -> categories (category_id)); +diesel::joinable!(crates_categories -> crates (crate_id)); +diesel::joinable!(crates_keywords -> crates (crate_id)); +diesel::joinable!(crates_keywords -> keywords (keyword_id)); +diesel::joinable!(dependencies -> crates (crate_id)); +diesel::joinable!(dependencies -> versions (version_id)); +diesel::joinable!(emails -> users (user_id)); +diesel::joinable!(follows -> crates (crate_id)); +diesel::joinable!(follows -> users (user_id)); +diesel::joinable!(publish_limit_buckets -> users (user_id)); +diesel::joinable!(publish_rate_overrides -> users (user_id)); +diesel::joinable!(readme_renderings -> versions (version_id)); +diesel::joinable!(recent_crate_downloads -> crates (crate_id)); +diesel::joinable!(version_downloads -> versions (version_id)); +diesel::joinable!(version_owner_actions -> api_tokens (api_token_id)); +diesel::joinable!(version_owner_actions -> users (user_id)); +diesel::joinable!(version_owner_actions -> versions (version_id)); +diesel::joinable!(versions -> crates (crate_id)); +diesel::joinable!(versions -> users (published_by)); +diesel::joinable!(versions_published_by -> versions (version_id)); -allow_tables_to_appear_in_same_query!( +diesel::allow_tables_to_appear_in_same_query!( api_tokens, background_jobs, badges, diff --git a/src/sql.rs b/src/sql.rs index c1ec9a61f80..751c4936d2e 100644 --- a/src/sql.rs +++ b/src/sql.rs @@ -1,6 +1,6 @@ -use diesel::sql_types::{Array, Date, Double, Interval, Text, Timestamp}; +use diesel::sql_types::{Date, Double, Interval, SingleValue, Text, Timestamp}; -sql_function!(#[aggregate] fn array_agg(x: T) -> Array); +sql_function!(#[aggregate] fn array_agg(x: T) -> Array); sql_function!(fn canon_crate_name(x: Text) -> Text); sql_function!(fn to_char(a: Date, b: Text) -> Text); sql_function!(fn lower(x: Text) -> Text); @@ -10,5 +10,5 @@ sql_function! { fn interval_part(x: Text, y: Interval) -> Double; } sql_function!(fn floor(x: Double) -> Integer); -sql_function!(fn greatest(x: T, y: T) -> T); -sql_function!(fn least(x: T, y: T) -> T); +sql_function!(fn greatest(x: T, y: T) -> T); +sql_function!(fn least(x: T, y: T) -> T); diff --git a/src/swirl/runner.rs b/src/swirl/runner.rs index a027013f609..d5d258e4d08 100644 --- a/src/swirl/runner.rs +++ b/src/swirl/runner.rs @@ -127,7 +127,7 @@ impl Runner { // The connection may not be `Send` so we need to clone the pool instead let pool = self.connection_pool.clone(); self.thread_pool.execute(move || { - let conn = &*match pool.get() { + let conn = &mut *match pool.get() { Ok(conn) => conn, Err(e) => { // TODO: Review error handling and possibly drop all usage of `let _ =` in this file @@ -136,7 +136,7 @@ impl Runner { } }; - let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { + let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|conn| { let job = match storage::find_next_unlocked_job(conn).optional() { Ok(Some(j)) => { let _ = sender.send(Event::Working); @@ -153,20 +153,15 @@ impl Runner { }; let job_id = job.id; - let transaction_manager = conn.transaction_manager(); - let initial_depth = >::get_transaction_depth( - transaction_manager - ); + let initial_depth = get_transaction_depth(conn)?; if initial_depth != 1 { warn!("Initial transaction depth is not 1. This is very unexpected"); } let result = conn - .transaction(|| { + .transaction(|conn| { let pool = pool.to_real_pool(); - let state = AssertUnwindSafe(PerformState {conn, pool}); + let state = AssertUnwindSafe(PerformState { conn, pool }); catch_unwind(|| { // Ensure the whole `AssertUnwindSafe(_)` is moved let state = state; @@ -177,28 +172,17 @@ impl Runner { // TODO: Replace with flatten() once that stabilizes .and_then(std::convert::identity); + // If the job panics it could leave the connection inside an inner transaction(s). + // Attempt to roll those back so we can mark the job as failed, but if the rollback + // fails then there isn't much we can do at this point so return early. `r2d2` will + // detect the bad state and drop it from the pool. loop { - let depth = >::get_transaction_depth( - transaction_manager - ); + let depth = get_transaction_depth(conn)?; if depth == initial_depth { break; } warn!("Rolling back a transaction due to a panic in a background task"); - match transaction_manager - .rollback_transaction(conn) - { - Ok(_) => (), - Err(e) => { - error!("Leaking a thread and database connection because of an error while rolling back transaction: {e}"); - loop { - std::thread::sleep(Duration::from_secs(24 * 60 * 60)); - error!("How am I still alive?"); - } - } - } + AnsiTransactionManager::rollback_transaction(conn)?; } match result { @@ -236,7 +220,7 @@ impl Runner { // FIXME: Only public for `src/tests/util/test_app.rs` pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { self.wait_for_jobs()?; - let failed_jobs = storage::failed_job_count(&*self.connection()?)?; + let failed_jobs = storage::failed_job_count(&mut *self.connection()?)?; if failed_jobs == 0 { Ok(()) } else { @@ -255,6 +239,14 @@ impl Runner { } } +fn get_transaction_depth(conn: &mut PgConnection) -> QueryResult { + let transaction_manager = AnsiTransactionManager::transaction_manager_status_mut(conn); + Ok(transaction_manager + .transaction_depth()? + .map(u32::from) + .unwrap_or(0)) +} + /// Try to figure out what's in the box, and print it if we can. /// /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. @@ -329,7 +321,7 @@ mod tests { let remaining_jobs = background_jobs .count() - .get_result(&*runner.connection().unwrap()); + .get_result(&mut *runner.connection().unwrap()); assert_eq!(Ok(0), remaining_jobs); } @@ -343,14 +335,14 @@ mod tests { let barrier2 = barrier.clone(); runner.get_single_job(dummy_sender(), move |_, state| { - state.conn.transaction(|| { + state.conn.transaction(|_| { barrier.0.wait(); // The job should go back into the queue after a panic panic!(); }) }); - let conn = &*runner.connection().unwrap(); + let conn = &mut *runner.connection().unwrap(); // Wait for the first thread to acquire the lock barrier2.0.wait(); // We are intentionally not using `get_single_job` here. @@ -390,7 +382,7 @@ mod tests { .find(job_id) .select(retries) .for_update() - .first::(&*runner.connection().unwrap()) + .first::(&mut *runner.connection().unwrap()) .unwrap(); assert_eq!(1, tries); } @@ -413,8 +405,8 @@ mod tests { impl<'a> Drop for TestGuard<'a> { fn drop(&mut self) { - ::diesel::sql_query("TRUNCATE TABLE background_jobs") - .execute(&*runner().connection().unwrap()) + diesel::sql_query("TRUNCATE TABLE background_jobs") + .execute(&mut *runner().connection().unwrap()) .unwrap(); } } @@ -427,10 +419,10 @@ mod tests { } fn create_dummy_job(runner: &Runner) -> storage::BackgroundJob { - ::diesel::insert_into(background_jobs) + diesel::insert_into(background_jobs) .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) .returning((id, job_type, data)) - .get_result(&*runner.connection().unwrap()) + .get_result(&mut *runner.connection().unwrap()) .unwrap() } } diff --git a/src/swirl/storage.rs b/src/swirl/storage.rs index 37683ff5393..e7beb7f6447 100644 --- a/src/swirl/storage.rs +++ b/src/swirl/storage.rs @@ -24,7 +24,7 @@ fn retriable() -> Box QueryResult { +pub(super) fn find_next_unlocked_job(conn: &mut PgConnection) -> QueryResult { use schema::background_jobs::dsl::*; background_jobs @@ -37,7 +37,7 @@ pub(super) fn find_next_unlocked_job(conn: &PgConnection) -> QueryResult QueryResult { +pub(super) fn failed_job_count(conn: &mut PgConnection) -> QueryResult { use schema::background_jobs::dsl::*; background_jobs @@ -47,7 +47,7 @@ pub(super) fn failed_job_count(conn: &PgConnection) -> QueryResult { } /// Deletes a job that has successfully completed running -pub(super) fn delete_successful_job(conn: &PgConnection, job_id: i64) -> QueryResult<()> { +pub(super) fn delete_successful_job(conn: &mut PgConnection, job_id: i64) -> QueryResult<()> { use schema::background_jobs::dsl::*; delete(background_jobs.find(job_id)).execute(conn)?; @@ -58,7 +58,7 @@ pub(super) fn delete_successful_job(conn: &PgConnection, job_id: i64) -> QueryRe /// /// Ignores any database errors that may have occurred. If the DB has gone away, /// we assume that just trying again with a new connection will succeed. -pub(super) fn update_failed_job(conn: &PgConnection, job_id: i64) { +pub(super) fn update_failed_job(conn: &mut PgConnection, job_id: i64) { use schema::background_jobs::dsl::*; let _ = update(background_jobs.find(job_id)) diff --git a/src/test_util.rs b/src/test_util.rs index 0cae4fe771e..0d1bbf214bd 100644 --- a/src/test_util.rs +++ b/src/test_util.rs @@ -9,7 +9,7 @@ pub fn pg_connection_no_transaction() -> PgConnection { } pub fn pg_connection() -> PgConnection { - let conn = pg_connection_no_transaction(); + let mut conn = pg_connection_no_transaction(); conn.begin_test_transaction().unwrap(); conn } diff --git a/src/tests/all.rs b/src/tests/all.rs index d1f42d5d88e..a1f905edefb 100644 --- a/src/tests/all.rs +++ b/src/tests/all.rs @@ -3,8 +3,6 @@ #[macro_use] extern crate claims; #[macro_use] -extern crate diesel; -#[macro_use] extern crate serde; #[macro_use] extern crate serde_json; @@ -132,7 +130,12 @@ fn new_team(login: &str) -> NewTeam<'_> { } } -fn add_team_to_crate(t: &Team, krate: &Crate, u: &User, conn: &PgConnection) -> QueryResult<()> { +fn add_team_to_crate( + t: &Team, + krate: &Crate, + u: &User, + conn: &mut PgConnection, +) -> QueryResult<()> { let crate_owner = CrateOwner { crate_id: krate.id, owner_id: t.id, diff --git a/src/tests/builders/krate.rs b/src/tests/builders/krate.rs index 37bbb99598b..b0d688c8f5e 100644 --- a/src/tests/builders/krate.rs +++ b/src/tests/builders/krate.rs @@ -111,7 +111,7 @@ impl<'a> CrateBuilder<'a> { self } - pub fn build(mut self, connection: &PgConnection) -> AppResult { + pub fn build(mut self, connection: &mut PgConnection) -> AppResult { use diesel::{insert_into, select, update}; let mut krate = self @@ -147,8 +147,8 @@ impl<'a> CrateBuilder<'a> { )) .execute(connection)?; - no_arg_sql_function!(refresh_recent_crate_downloads, ()); - select(refresh_recent_crate_downloads).execute(connection)?; + sql_function!(fn refresh_recent_crate_downloads()); + select(refresh_recent_crate_downloads()).execute(connection)?; } if !self.categories.is_empty() { @@ -175,7 +175,7 @@ impl<'a> CrateBuilder<'a> { /// /// Panics (and fails the test) if any part of inserting the crate record fails. #[track_caller] - pub fn expect_build(self, connection: &PgConnection) -> Crate { + pub fn expect_build(self, connection: &mut PgConnection) -> Crate { let name = self.krate.name; self.build(connection).unwrap_or_else(|e| { panic!("Unable to create crate {name}: {e:?}"); diff --git a/src/tests/builders/version.rs b/src/tests/builders/version.rs index 13d64f356de..2187ada303b 100644 --- a/src/tests/builders/version.rs +++ b/src/tests/builders/version.rs @@ -87,7 +87,7 @@ impl<'a> VersionBuilder<'a> { self, crate_id: i32, published_by: i32, - connection: &PgConnection, + connection: &mut PgConnection, ) -> AppResult { use diesel::{insert_into, update}; @@ -150,7 +150,7 @@ impl<'a> VersionBuilder<'a> { self, crate_id: i32, published_by: i32, - connection: &PgConnection, + connection: &mut PgConnection, ) -> Version { self.build(crate_id, published_by, connection) .unwrap_or_else(|e| { diff --git a/src/tests/categories.rs b/src/tests/categories.rs index 52d9aba6e7e..1f65df66fec 100644 --- a/src/tests/categories.rs +++ b/src/tests/categories.rs @@ -40,12 +40,12 @@ description = "Another category ho hum" fn pg_connection() -> PgConnection { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); - let conn = PgConnection::establish(&database_url).unwrap(); + let mut conn = PgConnection::establish(&database_url).unwrap(); conn.begin_test_transaction().unwrap(); conn } -fn select_slugs(conn: &PgConnection) -> Vec { +fn select_slugs(conn: &mut PgConnection) -> Vec { categories::table .select(categories::slug) .order(categories::slug) @@ -55,33 +55,32 @@ fn select_slugs(conn: &PgConnection) -> Vec { #[test] fn sync_adds_new_categories() { - let conn = pg_connection(); + let conn = &mut pg_connection(); - ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, &conn).unwrap(); + ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, conn).unwrap(); - let categories = select_slugs(&conn); + let categories = select_slugs(conn); assert_eq!(categories, vec!["algorithms", "algorithms::such"]); } #[test] fn sync_removes_missing_categories() { - let conn = pg_connection(); + let conn = &mut pg_connection(); - ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, &conn).unwrap(); - ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS, &conn).unwrap(); + ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, conn).unwrap(); + ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS, conn).unwrap(); - let categories = select_slugs(&conn); + let categories = select_slugs(conn); assert_eq!(categories, vec!["algorithms"]); } #[test] fn sync_adds_and_removes() { - let conn = pg_connection(); + let conn = &mut pg_connection(); - ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, &conn).unwrap(); - ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_ANOTHER, &conn) - .unwrap(); + ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_SUCH, conn).unwrap(); + ::cargo_registry::boot::categories::sync_with_connection(ALGORITHMS_AND_ANOTHER, conn).unwrap(); - let categories = select_slugs(&conn); + let categories = select_slugs(conn); assert_eq!(categories, vec!["algorithms", "another"]); } diff --git a/src/tests/read_only_mode.rs b/src/tests/read_only_mode.rs index 72c880d586d..f64eaf25d3d 100644 --- a/src/tests/read_only_mode.rs +++ b/src/tests/read_only_mode.rs @@ -58,7 +58,7 @@ fn can_download_crate_in_read_only_mode() { }) } -fn set_read_only(conn: &PgConnection) -> QueryResult<()> { +fn set_read_only(conn: &mut PgConnection) -> QueryResult<()> { diesel::sql_query("SET TRANSACTION READ ONLY").execute(conn)?; diesel::sql_query("SAVEPOINT test_post_readonly").execute(conn)?; Ok(()) diff --git a/src/tests/schema_details.rs b/src/tests/schema_details.rs index a3d180638df..1765d8fbcf1 100644 --- a/src/tests/schema_details.rs +++ b/src/tests/schema_details.rs @@ -45,17 +45,17 @@ fn all_columns_called_version_id_have_a_cascading_foreign_key() { #[derive(QueryableByName)] struct FkConstraint { - #[sql_type = "Text"] - #[column_name = "conname"] + #[diesel(sql_type = Text)] + #[diesel(column_name = conname)] name: String, - #[sql_type = "Text"] + #[diesel(sql_type = Text)] definition: String, } #[derive(QueryableByName)] struct TableNameAndConstraint { - #[sql_type = "Text"] - #[column_name = "relname"] + #[diesel(sql_type = Text)] + #[diesel(column_name = relname)] table_name: String, #[diesel(embed)] constraint: Option, diff --git a/src/tests/server_binary.rs b/src/tests/server_binary.rs index b143555f440..fe42112f73a 100644 --- a/src/tests/server_binary.rs +++ b/src/tests/server_binary.rs @@ -16,7 +16,7 @@ const SERVER_BOOT_TIMEOUT_SECONDS: u64 = 30; #[test] fn normal_startup() { let server_bin = ServerBin::prepare().unwrap(); - initialize_dummy_crate(&server_bin.db().unwrap()); + initialize_dummy_crate(&mut server_bin.db().unwrap()); let running_server = server_bin.start().unwrap(); @@ -37,7 +37,7 @@ fn normal_startup() { #[test] fn startup_without_database() { let server_bin = ServerBin::prepare().unwrap(); - initialize_dummy_crate(&server_bin.db().unwrap()); + initialize_dummy_crate(&mut server_bin.db().unwrap()); // Break the networking *before* starting the binary, to ensure the binary can fully startup // without a database connection. Most of crates.io should not work when started without a @@ -60,7 +60,7 @@ fn startup_without_database() { .ends_with("/crates/FOO/FOO-1.0.0.crate")); } -fn initialize_dummy_crate(conn: &PgConnection) { +fn initialize_dummy_crate(conn: &mut PgConnection) { use cargo_registry::schema::users; let user: User = diesel::insert_into(users::table) diff --git a/src/tests/util/fresh_schema.rs b/src/tests/util/fresh_schema.rs index 553ae1bac6b..08ba69a036d 100644 --- a/src/tests/util/fresh_schema.rs +++ b/src/tests/util/fresh_schema.rs @@ -1,6 +1,6 @@ use diesel::connection::SimpleConnection; use diesel::prelude::*; -use diesel_migrations::{find_migrations_directory, run_pending_migrations_in_directory}; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; use rand::Rng; pub(crate) struct FreshSchema { @@ -13,7 +13,7 @@ impl FreshSchema { pub(crate) fn new(database_url: &str) -> Self { let schema_name = generate_schema_name(); - let conn = PgConnection::establish(database_url).expect("can't connect to the test db"); + let mut conn = PgConnection::establish(database_url).expect("can't connect to the test db"); conn.batch_execute(&format!( " DROP SCHEMA IF EXISTS {schema_name} CASCADE; @@ -23,8 +23,9 @@ impl FreshSchema { )) .expect("failed to initialize schema"); - let migrations_dir = find_migrations_directory().unwrap(); - run_pending_migrations_in_directory(&conn, &migrations_dir, &mut std::io::sink()) + let migrations = + FileBasedMigrations::find_migrations_directory().expect("Could not find migrations"); + conn.run_pending_migrations(migrations) .expect("failed to run migrations on the test schema"); let database_url = url::Url::parse_with_params( diff --git a/src/tests/util/test_app.rs b/src/tests/util/test_app.rs index a630736071b..4808c0940db 100644 --- a/src/tests/util/test_app.rs +++ b/src/tests/util/test_app.rs @@ -48,8 +48,8 @@ impl Drop for TestAppInner { // Manually verify that all jobs have completed successfully // This will catch any tests that enqueued a job but forgot to initialize the runner - let conn = self.app.primary_database.get().unwrap(); - let job_count: i64 = background_jobs.count().get_result(&*conn).unwrap(); + let conn = &mut *self.app.primary_database.get().unwrap(); + let job_count: i64 = background_jobs.count().get_result(conn).unwrap(); assert_eq!( 0, job_count, "Unprocessed or failed jobs remain in the queue" @@ -93,9 +93,9 @@ impl TestApp { /// Within each test, the connection pool only has 1 connection so it is necessary to drop the /// connection before making any API calls. Once the closure returns, the connection is /// dropped, ensuring it is returned to the pool and available for any future API calls. - pub fn db T>(&self, f: F) -> T { - let conn = self.0.app.primary_database.get().unwrap(); - f(&conn) + pub fn db T>(&self, f: F) -> T { + let conn = &mut self.0.app.primary_database.get().unwrap(); + f(conn) } /// Create a new user with a verified email address in the database and return a mock user diff --git a/src/util/token.rs b/src/util/token.rs index c1de7deb94d..01ff1f133b5 100644 --- a/src/util/token.rs +++ b/src/util/token.rs @@ -1,12 +1,11 @@ -use diesel::{backend::Backend, deserialize::FromSql, pg::Pg, serialize::ToSql, sql_types::Bytea}; +use diesel::{deserialize::FromSql, pg::Pg, serialize::ToSql, sql_types::Bytea}; use rand::{distributions::Uniform, rngs::OsRng, Rng}; use sha2::{Digest, Sha256}; -use std::io::Write; const TOKEN_LENGTH: usize = 32; #[derive(FromSqlRow, AsExpression, Clone, PartialEq, Eq)] -#[sql_type = "Bytea"] +#[diesel(sql_type = Bytea)] pub struct SecureToken { sha256: Vec, } @@ -48,16 +47,13 @@ impl std::fmt::Debug for SecureToken { } impl ToSql for SecureToken { - fn to_sql( - &self, - out: &mut diesel::serialize::Output<'_, W, Pg>, - ) -> diesel::serialize::Result { - ToSql::::to_sql(&self.sha256, out) + fn to_sql(&self, out: &mut diesel::serialize::Output<'_, '_, Pg>) -> diesel::serialize::Result { + ToSql::::to_sql(&self.sha256, &mut out.reborrow()) } } impl FromSql for SecureToken { - fn from_sql(bytes: Option<&::RawValue>) -> diesel::deserialize::Result { + fn from_sql(bytes: diesel::pg::PgValue<'_>) -> diesel::deserialize::Result { Ok(Self { sha256: FromSql::::from_sql(bytes)?, }) diff --git a/src/views/krate_publish.rs b/src/views/krate_publish.rs index 899765c06a7..44658286f20 100644 --- a/src/views/krate_publish.rs +++ b/src/views/krate_publish.rs @@ -227,11 +227,10 @@ impl Serialize for EncodableCrateVersionReq { use diesel::pg::Pg; use diesel::serialize::{self, Output, ToSql}; use diesel::sql_types::Text; -use std::io::Write; impl ToSql for EncodableFeature { - fn to_sql(&self, out: &mut Output<'_, W, Pg>) -> serialize::Result { - ToSql::::to_sql(&**self, out) + fn to_sql(&self, out: &mut Output<'_, '_, Pg>) -> serialize::Result { + ToSql::::to_sql(&**self, &mut out.reborrow()) } } diff --git a/src/worker/daily_db_maintenance.rs b/src/worker/daily_db_maintenance.rs index af10812cd1a..29b6a9dde8f 100644 --- a/src/worker/daily_db_maintenance.rs +++ b/src/worker/daily_db_maintenance.rs @@ -11,7 +11,7 @@ use crate::swirl::PerformError; /// auto-vacuum again. use diesel::{sql_query, PgConnection, RunQueryDsl}; -pub(crate) fn perform_daily_db_maintenance(conn: &PgConnection) -> Result<(), PerformError> { +pub(crate) fn perform_daily_db_maintenance(conn: &mut PgConnection) -> Result<(), PerformError> { info!("Running VACUUM on version_downloads table"); sql_query("VACUUM version_downloads;").execute(conn)?; info!("Finished running VACUUM on version_downloads table"); diff --git a/src/worker/dump_db/gen_scripts.rs b/src/worker/dump_db/gen_scripts.rs index 175ed5ec9af..86c14e6111b 100644 --- a/src/worker/dump_db/gen_scripts.rs +++ b/src/worker/dump_db/gen_scripts.rs @@ -122,8 +122,8 @@ mod tests { /// test database. #[test] fn check_visibility_config() { - let conn = pg_connection(); - let db_columns = HashSet::::from_iter(get_db_columns(&conn)); + let conn = &mut pg_connection(); + let db_columns = HashSet::::from_iter(get_db_columns(conn)); let vis_columns = VisibilityConfig::get() .0 .iter() @@ -177,7 +177,7 @@ mod tests { column_name: String, } - fn get_db_columns(conn: &PgConnection) -> Vec { + fn get_db_columns(conn: &mut PgConnection) -> Vec { use information_schema::columns::dsl::*; columns .select((table_name, column_name)) diff --git a/src/worker/git.rs b/src/worker/git.rs index 32b04c5a350..f9b80cbd764 100644 --- a/src/worker/git.rs +++ b/src/worker/git.rs @@ -14,7 +14,7 @@ use std::process::Command; #[instrument(skip_all, fields(krate.name = ?krate.name, krate.vers = ?krate.vers))] pub fn perform_index_add_crate( env: &Environment, - conn: &PgConnection, + conn: &mut PgConnection, krate: &Crate, ) -> Result<(), PerformError> { info!("Syncing git index to HTTP-based index"); @@ -81,7 +81,7 @@ pub fn update_crate_index(crate_name: String) -> Job { #[instrument(skip(env, conn))] pub fn perform_index_update_yanked( env: &Environment, - conn: &PgConnection, + conn: &mut PgConnection, krate: &str, version_num: &str, ) -> Result<(), PerformError> { diff --git a/src/worker/readmes.rs b/src/worker/readmes.rs index 16dd2ee6db3..6e7ba2e1652 100644 --- a/src/worker/readmes.rs +++ b/src/worker/readmes.rs @@ -8,7 +8,7 @@ use crate::background_jobs::{Environment, Job, RenderAndUploadReadmeJob}; use crate::models::Version; pub fn perform_render_and_upload_readme( - conn: &PgConnection, + conn: &mut PgConnection, env: &Environment, version_id: i32, text: &str, @@ -21,7 +21,7 @@ pub fn perform_render_and_upload_readme( let rendered = text_to_html(text, readme_path, base_url, pkg_path_in_vcs); - conn.transaction(|| { + conn.transaction(|conn| { Version::record_readme_rendering(version_id, conn)?; let (crate_name, vers): (String, String) = versions::table .find(version_id) diff --git a/src/worker/update_downloads.rs b/src/worker/update_downloads.rs index 3a0466f7a05..20d2fe41f83 100644 --- a/src/worker/update_downloads.rs +++ b/src/worker/update_downloads.rs @@ -7,7 +7,7 @@ use crate::background_jobs::Job; use crate::swirl::PerformError; use diesel::prelude::*; -pub fn perform_update_downloads(conn: &PgConnection) -> Result<(), PerformError> { +pub fn perform_update_downloads(conn: &mut PgConnection) -> Result<(), PerformError> { update(conn)?; Ok(()) } @@ -16,7 +16,7 @@ pub fn update_downloads() -> Job { Job::UpdateDownloads } -fn update(conn: &PgConnection) -> QueryResult<()> { +fn update(conn: &mut PgConnection) -> QueryResult<()> { use self::version_downloads::dsl::*; use diesel::dsl::now; use diesel::select; @@ -40,20 +40,20 @@ fn update(conn: &PgConnection) -> QueryResult<()> { .execute(conn)?; info!("Finished freezing old version_downloads"); - no_arg_sql_function!(refresh_recent_crate_downloads, ()); - select(refresh_recent_crate_downloads).execute(conn)?; - info!("Finished running refresh_recent_crate_downloads"); + sql_function!(fn refresh_recent_crate_downloads()); + select(refresh_recent_crate_downloads()).execute(conn)?; + println!("Finished running refresh_recent_crate_downloads"); Ok(()) } -fn collect(conn: &PgConnection, rows: &[VersionDownload]) -> QueryResult<()> { +fn collect(conn: &mut PgConnection, rows: &[VersionDownload]) -> QueryResult<()> { use diesel::update; for download in rows { let amt = download.downloads - download.counted; - conn.transaction::<_, diesel::result::Error, _>(|| { + conn.transaction::<_, diesel::result::Error, _>(|conn| { // Update the total number of version downloads let crate_id: i32 = update(versions::table.find(download.version_id)) .set(versions::downloads.eq(versions::downloads + amt)) @@ -91,13 +91,13 @@ mod test { use crate::models::{Crate, NewCrate, NewUser, NewVersion, User, Version}; use std::collections::BTreeMap; - fn user(conn: &PgConnection) -> User { + fn user(conn: &mut PgConnection) -> User { NewUser::new(2, "login", None, None, "access_token") .create_or_update(None, &Emails::new_in_memory(), conn) .unwrap() } - fn crate_and_version(conn: &PgConnection, user_id: i32) -> (Crate, Version) { + fn crate_and_version(conn: &mut PgConnection, user_id: i32) -> (Crate, Version) { let krate = NewCrate { name: "foo", ..Default::default() @@ -124,12 +124,12 @@ mod test { fn increment() { use diesel::dsl::*; - let conn = crate::db::test_conn(); - let user = user(&conn); - let (krate, version) = crate_and_version(&conn, user.id); + let conn = &mut crate::db::test_conn(); + let user = user(conn); + let (krate, version) = crate_and_version(conn, user.id); insert_into(version_downloads::table) .values(version_downloads::version_id.eq(version.id)) - .execute(&conn) + .execute(conn) .unwrap(); insert_into(version_downloads::table) .values(( @@ -137,25 +137,25 @@ mod test { version_downloads::date.eq(date(now - 1.day())), version_downloads::processed.eq(true), )) - .execute(&conn) + .execute(conn) .unwrap(); - super::update(&conn).unwrap(); + super::update(conn).unwrap(); let version_downloads = versions::table .find(version.id) .select(versions::downloads) - .first(&conn); + .first(conn); assert_eq!(Ok(1), version_downloads); let crate_downloads = crates::table .find(krate.id) .select(crates::downloads) - .first(&conn); + .first(conn); assert_eq!(Ok(1), crate_downloads); - super::update(&conn).unwrap(); + super::update(conn).unwrap(); let version_downloads = versions::table .find(version.id) .select(versions::downloads) - .first(&conn); + .first(conn); assert_eq!(Ok(1), version_downloads); } @@ -163,9 +163,9 @@ mod test { fn set_processed_true() { use diesel::dsl::*; - let conn = crate::db::test_conn(); - let user = user(&conn); - let (_, version) = crate_and_version(&conn, user.id); + let conn = &mut crate::db::test_conn(); + let user = user(conn); + let (_, version) = crate_and_version(conn, user.id); insert_into(version_downloads::table) .values(( version_downloads::version_id.eq(version.id), @@ -174,22 +174,22 @@ mod test { version_downloads::date.eq(date(now - 2.days())), version_downloads::processed.eq(false), )) - .execute(&conn) + .execute(conn) .unwrap(); - super::update(&conn).unwrap(); + super::update(conn).unwrap(); let processed = version_downloads::table .filter(version_downloads::version_id.eq(version.id)) .select(version_downloads::processed) - .first(&conn); + .first(conn); assert_eq!(Ok(true), processed); } #[test] fn dont_process_recent_row() { use diesel::dsl::*; - let conn = crate::db::test_conn(); - let user = user(&conn); - let (_, version) = crate_and_version(&conn, user.id); + let conn = &mut crate::db::test_conn(); + let user = user(conn); + let (_, version) = crate_and_version(conn, user.id); insert_into(version_downloads::table) .values(( version_downloads::version_id.eq(version.id), @@ -198,13 +198,13 @@ mod test { version_downloads::date.eq(date(now)), version_downloads::processed.eq(false), )) - .execute(&conn) + .execute(conn) .unwrap(); - super::update(&conn).unwrap(); + super::update(conn).unwrap(); let processed = version_downloads::table .filter(version_downloads::version_id.eq(version.id)) .select(version_downloads::processed) - .first(&conn); + .first(conn); assert_eq!(Ok(false), processed); } @@ -213,16 +213,16 @@ mod test { use diesel::dsl::*; use diesel::update; - let conn = crate::db::test_conn(); - let user = user(&conn); - let (krate, version) = crate_and_version(&conn, user.id); + let conn = &mut crate::db::test_conn(); + let user = user(conn); + let (krate, version) = crate_and_version(conn, user.id); update(versions::table) .set(versions::updated_at.eq(now - 2.hours())) - .execute(&conn) + .execute(conn) .unwrap(); update(crates::table) .set(crates::updated_at.eq(now - 2.hours())) - .execute(&conn) + .execute(conn) .unwrap(); insert_into(version_downloads::table) .values(( @@ -232,33 +232,33 @@ mod test { version_downloads::date.eq(date(now)), version_downloads::processed.eq(false), )) - .execute(&conn) + .execute(conn) .unwrap(); insert_into(version_downloads::table) .values(( version_downloads::version_id.eq(version.id), version_downloads::date.eq(date(now - 1.day())), )) - .execute(&conn) + .execute(conn) .unwrap(); - let version_before: Version = versions::table.find(version.id).first(&conn).unwrap(); + let version_before: Version = versions::table.find(version.id).first(conn).unwrap(); let krate_before: Crate = Crate::all() .filter(crates::id.eq(krate.id)) - .first(&conn) + .first(conn) .unwrap(); - super::update(&conn).unwrap(); - let version2: Version = versions::table.find(version.id).first(&conn).unwrap(); + super::update(conn).unwrap(); + let version2: Version = versions::table.find(version.id).first(conn).unwrap(); assert_eq!(version2.downloads, 2); assert_eq!(version2.updated_at, version_before.updated_at); let krate2: Crate = Crate::all() .filter(crates::id.eq(krate.id)) - .first(&conn) + .first(conn) .unwrap(); assert_eq!(krate2.downloads, 2); assert_eq!(krate2.updated_at, krate_before.updated_at); - super::update(&conn).unwrap(); - let version3: Version = versions::table.find(version.id).first(&conn).unwrap(); + super::update(conn).unwrap(); + let version3: Version = versions::table.find(version.id).first(conn).unwrap(); assert_eq!(version3.downloads, 2); } @@ -267,16 +267,16 @@ mod test { use diesel::dsl::*; use diesel::update; - let conn = crate::db::test_conn(); - let user = user(&conn); - let (_, version) = crate_and_version(&conn, user.id); + let conn = &mut crate::db::test_conn(); + let user = user(conn); + let (_, version) = crate_and_version(conn, user.id); update(versions::table) .set(versions::updated_at.eq(now - 2.days())) - .execute(&conn) + .execute(conn) .unwrap(); update(crates::table) .set(crates::updated_at.eq(now - 2.days())) - .execute(&conn) + .execute(conn) .unwrap(); insert_into(version_downloads::table) .values(( @@ -286,16 +286,16 @@ mod test { version_downloads::date.eq(date(now - 2.days())), version_downloads::processed.eq(false), )) - .execute(&conn) + .execute(conn) .unwrap(); - super::update(&conn).unwrap(); + super::update(conn).unwrap(); let versions_changed = versions::table .select(versions::updated_at.ne(now - 2.days())) - .get_result(&conn); + .get_result(conn); let crates_changed = crates::table .select(crates::updated_at.ne(now - 2.days())) - .get_result(&conn); + .get_result(conn); assert_eq!(Ok(false), versions_changed); assert_eq!(Ok(false), crates_changed); }