diff --git a/Cargo.lock b/Cargo.lock index ddfb69a32a7..cb00af476b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -346,6 +346,7 @@ dependencies = [ "serde", "serde_json", "tempfile", + "tracing", "url", ] diff --git a/cargo-registry-index/Cargo.toml b/cargo-registry-index/Cargo.toml index 350214ba846..69f1249da9a 100644 --- a/cargo-registry-index/Cargo.toml +++ b/cargo-registry-index/Cargo.toml @@ -20,5 +20,6 @@ dotenv = "=0.15.0" git2 = "=0.15.0" serde = { version = "=1.0.147", features = ["derive"] } tempfile = "=3.3.0" +tracing = "=0.1.37" url = "=2.3.1" serde_json = { version = "=1.0.89", optional = true } diff --git a/cargo-registry-index/lib.rs b/cargo-registry-index/lib.rs index e46624b1ef6..5ee4a18bc05 100644 --- a/cargo-registry-index/lib.rs +++ b/cargo-registry-index/lib.rs @@ -1,5 +1,7 @@ #[macro_use] extern crate serde; +#[macro_use] +extern crate tracing; #[cfg(feature = "testing")] pub mod testing; @@ -177,10 +179,8 @@ impl RepositoryConfig { match (username, password, http_url, ssh_key, ssh_url) { (extra_user, extra_pass, extra_http_url, Ok(encoded_key), Ok(ssh_url)) => { if let (Ok(_), Ok(_), Ok(_)) = (extra_user, extra_pass, extra_http_url) { - println!( - "warning: both http and ssh credentials to authenticate with git are set" - ); - println!("note: ssh credentials will take precedence over the http ones"); + warn!("both http and ssh credentials to authenticate with git are set"); + info!("note: ssh credentials will take precedence over the http ones"); } let index_location = @@ -439,13 +439,13 @@ impl Repository { /// This function also prints the commit message and a success or failure /// message to the console. pub fn commit_and_push(&self, message: &str, modified_file: &Path) -> anyhow::Result<()> { - println!("Committing and pushing \"{message}\""); + info!("Committing and pushing \"{message}\""); let relative_path = modified_file.strip_prefix(self.checkout_path.path())?; self.perform_commit_and_push(message, relative_path) - .map(|_| println!("Commit and push finished for \"{message}\"")) + .map(|_| info!("Commit and push finished for \"{message}\"")) .map_err(|err| { - eprintln!("Commit and push for \"{message}\" errored: {err}"); + error!(?err, "Commit and push for \"{message}\" errored"); err }) } @@ -467,7 +467,7 @@ impl Repository { let head = self.head_oid()?; if head != original_head { - println!("Resetting index from {original_head} to {head}"); + info!("Resetting index from {original_head} to {head}"); } let obj = self.repository.find_object(head, None)?; diff --git a/src/admin/migrate.rs b/src/admin/migrate.rs index 4549304d928..89bf334c136 100644 --- a/src/admin/migrate.rs +++ b/src/admin/migrate.rs @@ -23,7 +23,7 @@ pub fn run(_opts: Opts) -> Result<(), Error> { // TODO: Check `any_pending_migrations()` with a read-only connection and error if true. // It looks like this requires changes upstream to make this pub in `migration_macros`. - println!("==> Skipping migrations and category sync (read-only mode)"); + warn!("Skipping migrations and category sync (read-only mode)"); // The service is undergoing maintenance or mitigating an outage. // Exit with success to ensure configuration changes can be made. @@ -34,10 +34,10 @@ pub fn run(_opts: Opts) -> Result<(), Error> { // The primary is online, access directly via `DATABASE_URL`. let conn = crate::db::oneoff_connection_with_config(&config)?; - println!("==> migrating the database"); + info!("Migrating the database"); embedded_migrations::run_with_output(&conn, &mut std::io::stdout())?; - println!("==> synchronizing crate categories"); + info!("Synchronizing crate categories"); crate::boot::categories::sync_with_connection(CATEGORIES_TOML, &conn).unwrap(); Ok(()) diff --git a/src/bin/background-worker.rs b/src/bin/background-worker.rs index fdfce814562..8f503fc48b6 100644 --- a/src/bin/background-worker.rs +++ b/src/bin/background-worker.rs @@ -12,6 +12,9 @@ #![warn(clippy::all, rust_2018_idioms)] +#[macro_use] +extern crate tracing; + use cargo_registry::config; use cargo_registry::worker::cloudfront::CloudFront; use cargo_registry::{background_jobs::*, db}; @@ -23,19 +26,19 @@ use std::thread::sleep; use std::time::Duration; fn main() { - println!("Booting runner"); - let _sentry = cargo_registry::sentry::init(); // Initialize logging cargo_registry::util::tracing::init(); + info!("Booting runner"); + let config = config::Server::default(); let uploader = config.base.uploader(); if config.db.are_all_read_only() { loop { - println!( + warn!( "Cannot run background jobs with a read-only pool. Please scale background_worker \ to 0 processes until the leader database is available." ); @@ -50,13 +53,13 @@ fn main() { .parse() .expect("Invalid value for `BACKGROUND_JOB_TIMEOUT`"); - println!("Cloning index"); + info!("Cloning index"); let repository_config = RepositoryConfig::from_environment(); let repository = Arc::new(Mutex::new( Repository::open(&repository_config).expect("Failed to clone index"), )); - println!("Index cloned"); + info!("Index cloned"); let cloudfront = CloudFront::from_environment(); @@ -79,7 +82,7 @@ fn main() { }; let mut runner = build_runner(); - println!("Runner booted, running jobs"); + info!("Runner booted, running jobs"); let mut failure_count = 0; @@ -87,10 +90,7 @@ fn main() { if let Err(e) = runner.run_all_pending_jobs() { failure_count += 1; if failure_count < 5 { - eprintln!( - "Error running jobs (n = {}) -- retrying: {:?}", - failure_count, e, - ); + warn!(?failure_count, err = ?e, "Error running jobs -- retrying"); runner = build_runner(); } else { panic!("Failed to begin running jobs 5 times. Restarting the process"); diff --git a/src/bin/server.rs b/src/bin/server.rs index fc14e9a5294..44762568119 100644 --- a/src/bin/server.rs +++ b/src/bin/server.rs @@ -1,5 +1,8 @@ #![warn(clippy::all, rust_2018_idioms)] +#[macro_use] +extern crate tracing; + use cargo_registry::{env_optional, metrics::LogEncoder, util::errors::AppResult, App, Env}; use std::{fs::File, process::Command, sync::Arc, time::Duration}; @@ -8,7 +11,6 @@ use futures_util::future::FutureExt; use prometheus::Encoder; use reqwest::blocking::Client; use std::io::{self, Write}; -use tokio::io::AsyncWriteExt; use tokio::signal::unix::{signal, SignalKind}; const CORE_THREADS: usize = 4; @@ -85,10 +87,8 @@ fn main() -> Result<(), Box> { _ = sig_int.recv().fuse() => {}, _ = sig_term.recv().fuse() => {}, }; - tokio::io::stdout() - .write_all(b"Starting graceful shutdown\n") - .await - .ok(); + + info!("Starting graceful shutdown"); }); Ok::<_, io::Error>((addr, server)) @@ -96,7 +96,7 @@ fn main() -> Result<(), Box> { // Do not change this line! Removing the line or changing its contents in any way will break // the test suite :) - println!("Listening at http://{addr}"); + info!("Listening at http://{addr}"); // Creating this file tells heroku to tell nginx that the application is ready // to receive traffic. @@ -106,7 +106,7 @@ fn main() -> Result<(), Box> { } else { "/tmp/app-initialized" }; - println!("Writing to {path}"); + info!("Writing to {path}"); File::create(path).unwrap(); // Launch nginx via the Heroku nginx buildpack @@ -120,13 +120,13 @@ fn main() -> Result<(), Box> { // Block the main thread until the server has shutdown rt.block_on(server)?; - println!("Persisting remaining downloads counters"); + info!("Persisting remaining downloads counters"); match app.downloads_counter.persist_all_shards(&app) { Ok(stats) => stats.log(), - Err(err) => println!("downloads_counter error: {err}"), + Err(err) => error!(?err, "downloads_counter error"), } - println!("Server has gracefully shutdown!"); + info!("Server has gracefully shutdown!"); Ok(()) } @@ -140,7 +140,7 @@ fn downloads_counter_thread(app: Arc) { match app.downloads_counter.persist_next_shard(&app) { Ok(stats) => stats.log(), - Err(err) => println!("downloads_counter error: {err}"), + Err(err) => error!(?err, "downloads_counter error"), } }); } @@ -155,7 +155,7 @@ fn log_instance_metrics_thread(app: Arc) { std::thread::spawn(move || loop { if let Err(err) = log_instance_metrics_inner(&app) { - eprintln!("log_instance_metrics error: {err}"); + error!(?err, "log_instance_metrics error"); } std::thread::sleep(interval); }); diff --git a/src/config/base.rs b/src/config/base.rs index 99720bc610e..594ff2deefe 100644 --- a/src/config/base.rs +++ b/src/config/base.rs @@ -38,13 +38,13 @@ impl Base { // for the related S3 environment variables and configure the app to upload to // and read from S3 like production does. All values except for bucket are // optional, like production read-only mirrors. - println!("Using S3 uploader"); + info!("Using S3 uploader"); Self::s3_maybe_read_only() } else { // If we don't set the `S3_BUCKET` variable, we'll use a development-only // uploader that makes it possible to run and publish to a locally-running // crates.io instance without needing to set up an account and a bucket in S3. - println!( + info!( "Using local uploader, crate files will be in the local_uploads directory" ); Uploader::Local diff --git a/src/middleware.rs b/src/middleware.rs index de08bfb0b77..5f4e3c7994a 100644 --- a/src/middleware.rs +++ b/src/middleware.rs @@ -90,12 +90,10 @@ pub fn build_middleware(app: Arc, endpoints: RouteBuilder) -> MiddlewareBui if let Ok(capacity) = env::var("DB_PRIMARY_POOL_SIZE") { if let Ok(capacity) = capacity.parse() { if capacity >= 10 { - println!("Enabling BalanceCapacity middleware with {capacity} pool capacity"); + info!(?capacity, "Enabling BalanceCapacity middleware"); m.around(balance_capacity::BalanceCapacity::new(capacity)) } else { - println!( - "BalanceCapacity middleware not enabled. DB_PRIMARY_POOL_SIZE is too low." - ); + info!("BalanceCapacity middleware not enabled. DB_PRIMARY_POOL_SIZE is too low."); } } } diff --git a/src/tests/server_binary.rs b/src/tests/server_binary.rs index 77e6181e986..b3076783186 100644 --- a/src/tests/server_binary.rs +++ b/src/tests/server_binary.rs @@ -121,6 +121,7 @@ impl ServerBin { let mut process = Command::new(env!("CARGO_BIN_EXE_server")) .env_clear() .envs(self.env.into_iter()) + .env("RUST_LOG", "info") .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; @@ -197,7 +198,10 @@ where // If we expect the port number to be logged into this stream, look for it and send it // over the channel as soon as it's found. if let Some(port_send) = &port_send { - if let Some(url) = line.strip_prefix("Listening at ") { + let pattern = "Listening at "; + if let Some(idx) = line.find(pattern) { + let start = idx + pattern.len(); + let url = &line[start..]; let url = Url::parse(url).unwrap(); let port = url.port().unwrap(); port_send