diff --git a/Cargo.toml b/Cargo.toml index 65c4eb8825..71ad8cf727 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "sqlx-postgres", "sqlx-sqlite", "examples/mysql/todos", + "examples/postgres/axum-multi-tenant", "examples/postgres/axum-social-with-tests", "examples/postgres/chat", "examples/postgres/files", @@ -23,6 +24,7 @@ members = [ "examples/postgres/todos", "examples/postgres/transaction", "examples/sqlite/todos", + "examples/sqlite/extension", ] [workspace.package] diff --git a/examples/postgres/axum-multi-tenant/Cargo.toml b/examples/postgres/axum-multi-tenant/Cargo.toml new file mode 100644 index 0000000000..c35df3575e --- /dev/null +++ b/examples/postgres/axum-multi-tenant/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "multi-tenant" +version.workspace = true +license.workspace = true +edition.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true +authors.workspace = true + +[dependencies] +accounts = { path = "accounts" } +payments = { path = "payments" } + +sqlx = { path = "../../..", features = ["runtime-tokio", "postgres"] } + +[lints] +workspace = true diff --git a/examples/postgres/axum-multi-tenant/README.md b/examples/postgres/axum-multi-tenant/README.md new file mode 100644 index 0000000000..d38f7f3ea5 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/README.md @@ -0,0 +1,11 @@ +# Axum App with Multi-tenant Database + +This example project involves three crates, each owning a different schema in one database, +with their own set of migrations. + +* The main crate, an Axum app. + * Owns the `public` schema (tables are referenced unqualified). +* `accounts`: a subcrate simulating a reusable account-management crate. + * Owns schema `accounts`. +* `payments`: a subcrate simulating a wrapper for a payments API. + * Owns schema `payments`. diff --git a/examples/postgres/axum-multi-tenant/accounts/Cargo.toml b/examples/postgres/axum-multi-tenant/accounts/Cargo.toml new file mode 100644 index 0000000000..485ba8eb73 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/accounts/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "accounts" +version = "0.1.0" +edition = "2021" + +[dependencies] +sqlx = { workspace = true, features = ["postgres", "time"] } +argon2 = { version = "0.5.3", features = ["password-hash"] } +tokio = { version = "1", features = ["rt", "sync"] } + +uuid = "1" +thiserror = "1" +rand = "0.8" diff --git a/examples/postgres/axum-multi-tenant/accounts/migrations/01_setup.sql b/examples/postgres/axum-multi-tenant/accounts/migrations/01_setup.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/postgres/axum-multi-tenant/accounts/migrations/02_account.sql b/examples/postgres/axum-multi-tenant/accounts/migrations/02_account.sql new file mode 100644 index 0000000000..91b9cf82e0 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/accounts/migrations/02_account.sql @@ -0,0 +1,8 @@ +create table account +( + account_id uuid primary key default gen_random_uuid(), + email text unique not null, + password_hash text not null, + created_at timestamptz not null default now(), + updated_at timestamptz +); diff --git a/examples/postgres/axum-multi-tenant/accounts/sqlx.toml b/examples/postgres/axum-multi-tenant/accounts/sqlx.toml new file mode 100644 index 0000000000..45042f1333 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/accounts/sqlx.toml @@ -0,0 +1,6 @@ +[migrate] +create-schemas = ["accounts"] +migrations-table = "accounts._sqlx_migrations" + +[macros.table-overrides.'accounts.account'] +'account_id' = "crate::AccountId" diff --git a/examples/postgres/axum-multi-tenant/accounts/src/lib.rs b/examples/postgres/axum-multi-tenant/accounts/src/lib.rs new file mode 100644 index 0000000000..f015af3d40 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/accounts/src/lib.rs @@ -0,0 +1,133 @@ +use std::error::Error; +use argon2::{password_hash, Argon2, PasswordHash, PasswordHasher, PasswordVerifier}; + +use password_hash::PasswordHashString; + +use sqlx::{PgConnection, PgTransaction}; +use sqlx::types::Text; + +use uuid::Uuid; + +use tokio::sync::Semaphore; + +#[derive(sqlx::Type)] +#[sqlx(transparent)] +pub struct AccountId(pub Uuid); + + +pub struct AccountsManager { + hashing_semaphore: Semaphore, +} + +#[derive(Debug, thiserror::Error)] +pub enum CreateError { + #[error("email in-use")] + EmailInUse, + General(#[source] + #[from] GeneralError), +} + +#[derive(Debug, thiserror::Error)] +pub enum AuthenticateError { + #[error("unknown email")] + UnknownEmail, + #[error("invalid password")] + InvalidPassword, + General(#[source] + #[from] GeneralError), +} + +#[derive(Debug, thiserror::Error)] +pub enum GeneralError { + Sqlx(#[source] + #[from] sqlx::Error), + PasswordHash(#[source] #[from] argon2::password_hash::Error), + Task(#[source] + #[from] tokio::task::JoinError), +} + +impl AccountsManager { + pub async fn new(conn: &mut PgConnection, max_hashing_threads: usize) -> Result { + sqlx::migrate!().run(conn).await?; + + AccountsManager { + hashing_semaphore: Semaphore::new(max_hashing_threads) + } + } + + async fn hash_password(&self, password: String) -> Result { + let guard = self.hashing_semaphore.acquire().await + .expect("BUG: this semaphore should not be closed"); + + // We transfer ownership to the blocking task and back to ensure Tokio doesn't spawn + // excess threads. + let (_guard, res) = tokio::task::spawn_blocking(move || { + let salt = argon2::password_hash::SaltString::generate(rand::thread_rng()); + (guard, Argon2::default().hash_password(password.as_bytes(), &salt)) + }) + .await?; + + Ok(res?) + } + + async fn verify_password(&self, password: String, hash: PasswordHashString) -> Result<(), AuthenticateError> { + let guard = self.hashing_semaphore.acquire().await + .expect("BUG: this semaphore should not be closed"); + + let (_guard, res) = tokio::task::spawn_blocking(move || { + (guard, Argon2::default().verify_password(password.as_bytes(), &hash.password_hash())) + }).await.map_err(GeneralError::from)?; + + if let Err(password_hash::Error::Password) = res { + return Err(AuthenticateError::InvalidPassword); + } + + res.map_err(GeneralError::from)?; + + Ok(()) + } + + pub async fn create(&self, txn: &mut PgTransaction, email: &str, password: String) -> Result { + // Hash password whether the account exists or not to make it harder + // to tell the difference in the timing. + let hash = self.hash_password(password).await?; + + // language=PostgreSQL + sqlx::query!( + "insert into accounts.account(email, password_hash) \ + values ($1, $2) \ + returning account_id", + email, + Text(hash) as Text>, + ) + .fetch_one(&mut *txn) + .await + .map_err(|e| if e.constraint() == Some("account_account_id_key") { + CreateError::EmailInUse + } else { + GeneralError::from(e).into() + }) + } + + pub async fn authenticate(&self, conn: &mut PgConnection, email: &str, password: String) -> Result { + let maybe_account = sqlx::query!( + "select account_id, password_hash as \"password_hash: Text\" \ + from accounts.account \ + where email_id = $1", + email + ) + .fetch_optional(&mut *conn) + .await + .map_err(GeneralError::from)?; + + let Some(account) = maybe_account else { + // Hash the password whether the account exists or not to hide the difference in timing. + self.hash_password(password).await.map_err(GeneralError::from)?; + return Err(AuthenticateError::UnknownEmail); + }; + + self.verify_password(password, account.password_hash.into())?; + + Ok(account.account_id) + } +} diff --git a/examples/postgres/axum-multi-tenant/payments/Cargo.toml b/examples/postgres/axum-multi-tenant/payments/Cargo.toml new file mode 100644 index 0000000000..0a2485955b --- /dev/null +++ b/examples/postgres/axum-multi-tenant/payments/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "payments" +version = "0.1.0" +edition = "2021" + +[dependencies] +sqlx = { workspace = true, features = ["postgres", "time"] } diff --git a/examples/postgres/axum-multi-tenant/payments/src/lib.rs b/examples/postgres/axum-multi-tenant/payments/src/lib.rs new file mode 100644 index 0000000000..7d12d9af81 --- /dev/null +++ b/examples/postgres/axum-multi-tenant/payments/src/lib.rs @@ -0,0 +1,14 @@ +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/examples/postgres/axum-multi-tenant/src/main.rs b/examples/postgres/axum-multi-tenant/src/main.rs new file mode 100644 index 0000000000..e7a11a969c --- /dev/null +++ b/examples/postgres/axum-multi-tenant/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} diff --git a/examples/sqlite/extension/Cargo.toml b/examples/sqlite/extension/Cargo.toml new file mode 100644 index 0000000000..bf20add4b3 --- /dev/null +++ b/examples/sqlite/extension/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "sqlx-example-sqlite-extension" +version = "0.1.0" +license.workspace = true +edition.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true +authors.workspace = true + +[dependencies] +sqlx = { path = "../../../", features = [ "sqlite", "runtime-tokio", "tls-native-tls" ] } +tokio = { version = "1.20.0", features = ["rt", "macros"]} +anyhow = "1.0" + +[lints] +workspace = true diff --git a/examples/sqlite/extension/download-extension.sh b/examples/sqlite/extension/download-extension.sh new file mode 100755 index 0000000000..ce7f23a486 --- /dev/null +++ b/examples/sqlite/extension/download-extension.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# This grabs a pre-compiled version of the extension used in this +# example, and stores it in a temporary directory. That's a bit +# unusual. Normally, any extensions you need will be installed into a +# directory on the library search path, either by using the system +# package manager or by compiling and installing it yourself. + +mkdir /tmp/sqlite3-lib && wget -O /tmp/sqlite3-lib/ipaddr.so https://github.com/nalgeon/sqlean/releases/download/0.15.2/ipaddr.so diff --git a/examples/sqlite/extension/migrations/20250203094951_addresses.sql b/examples/sqlite/extension/migrations/20250203094951_addresses.sql new file mode 100644 index 0000000000..af38213d5f --- /dev/null +++ b/examples/sqlite/extension/migrations/20250203094951_addresses.sql @@ -0,0 +1,25 @@ +create table addresses (address text, family integer); + +-- The `ipfamily` function is provided by the +-- [ipaddr](https://github.com/nalgeon/sqlean/blob/main/docs/ipaddr.md) +-- sqlite extension, and so this migration can not run if that +-- extension is not loaded. +insert into addresses (address, family) values + ('fd04:3d29:9f41::1', ipfamily('fd04:3d29:9f41::1')), + ('10.0.0.1', ipfamily('10.0.0.1')), + ('10.0.0.2', ipfamily('10.0.0.2')), + ('fd04:3d29:9f41::2', ipfamily('fd04:3d29:9f41::2')), + ('fd04:3d29:9f41::3', ipfamily('fd04:3d29:9f41::3')), + ('10.0.0.3', ipfamily('10.0.0.3')), + ('fd04:3d29:9f41::4', ipfamily('fd04:3d29:9f41::4')), + ('fd04:3d29:9f41::5', ipfamily('fd04:3d29:9f41::5')), + ('fd04:3d29:9f41::6', ipfamily('fd04:3d29:9f41::6')), + ('10.0.0.4', ipfamily('10.0.0.4')), + ('10.0.0.5', ipfamily('10.0.0.5')), + ('10.0.0.6', ipfamily('10.0.0.6')), + ('10.0.0.7', ipfamily('10.0.0.7')), + ('fd04:3d29:9f41::7', ipfamily('fd04:3d29:9f41::7')), + ('fd04:3d29:9f41::8', ipfamily('fd04:3d29:9f41::8')), + ('10.0.0.8', ipfamily('10.0.0.8')), + ('fd04:3d29:9f41::9', ipfamily('fd04:3d29:9f41::9')), + ('10.0.0.9', ipfamily('10.0.0.9')); diff --git a/examples/sqlite/extension/sqlx.toml b/examples/sqlite/extension/sqlx.toml new file mode 100644 index 0000000000..77f844642f --- /dev/null +++ b/examples/sqlite/extension/sqlx.toml @@ -0,0 +1,12 @@ +[common.drivers.sqlite] +# Including the full path to the extension is somewhat unusual, +# because normally an extension will be installed in a standard +# directory which is part of the library search path. If that were the +# case here, the load-extensions value could just be `["ipaddr"]` +# +# When the extension file is installed in a non-standard location, as +# in this example, there are two options: +# * Provide the full path the the extension, as seen below. +# * Add the non-standard location to the library search path, which on +# Linux means adding it to the LD_LIBRARY_PATH environment variable. +load-extensions = ["/tmp/sqlite3-lib/ipaddr"] \ No newline at end of file diff --git a/examples/sqlite/extension/src/main.rs b/examples/sqlite/extension/src/main.rs new file mode 100644 index 0000000000..e171e9a6d0 --- /dev/null +++ b/examples/sqlite/extension/src/main.rs @@ -0,0 +1,33 @@ +use std::str::FromStr; + +use sqlx::{query, sqlite::{SqlitePool, SqliteConnectOptions}}; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> anyhow::Result<()> { + let opts = SqliteConnectOptions::from_str(&std::env::var("DATABASE_URL")?)? + // The sqlx.toml file controls loading extensions for the CLI + // and for the query checking macros, *not* for the + // application while it's running. Thus, if we want the + // extension to be available during program execution, we need + // to load it. + // + // Note that while in this case the extension path is the same + // when checking the program (sqlx.toml) and when running it + // (here), this is not required. The runtime environment can + // be entirely different from the development one. + // + // The extension can be described with a full path, as seen + // here, but in many cases that will not be necessary. As long + // as the extension is installed in a directory on the library + // search path, it is sufficient to just provide the extension + // name, like "ipaddr" + .extension("/tmp/sqlite3-lib/ipaddr"); + + let db = SqlitePool::connect_with(opts).await?; + + query!("insert into addresses (address, family) values (?1, ipfamily(?1))", "10.0.0.10").execute(&db).await?; + + println!("Query which requires the extension was successfully executed."); + + Ok(()) +} diff --git a/examples/x.py b/examples/x.py index 79f6fda1ba..aaf4170c77 100755 --- a/examples/x.py +++ b/examples/x.py @@ -85,3 +85,4 @@ def project(name, database=None, driver=None): project("mysql/todos", driver="mysql_8", database="todos") project("postgres/todos", driver="postgres_12", database="todos") project("sqlite/todos", driver="sqlite", database="todos.db") +project("sqlite/extension", driver="sqlite", database="extension.db") diff --git a/sqlx-cli/src/lib.rs b/sqlx-cli/src/lib.rs index bb9f46ccc4..917faddd55 100644 --- a/sqlx-cli/src/lib.rs +++ b/sqlx-cli/src/lib.rs @@ -1,10 +1,12 @@ use std::io; +use std::path::PathBuf; use std::time::Duration; use futures::{Future, TryFutureExt}; use sqlx::{AnyConnection, Connection}; use tokio::{select, signal}; +use anyhow::Context; use crate::opt::{Command, ConnectOpts, DatabaseCommand, MigrateCommand}; @@ -188,7 +190,7 @@ async fn do_run(opt: Opt) -> anyhow::Result<()> { /// Attempt to connect to the database server, retrying up to `ops.connect_timeout`. async fn connect(opts: &ConnectOpts) -> anyhow::Result { - retry_connect_errors(opts, AnyConnection::connect).await + retry_connect_errors(opts, AnyConnection::connect_with_config).await } /// Attempt an operation that may return errors like `ConnectionRefused`, @@ -230,3 +232,18 @@ where ) .await } + +async fn config_from_current_dir() -> anyhow::Result<&'static Config> { + // Tokio does file I/O on a background task anyway + tokio::task::spawn_blocking(|| { + let path = PathBuf::from("sqlx.toml"); + + if path.exists() { + eprintln!("Found `sqlx.toml` in current directory; reading..."); + } + + Config::read_with_or_default(move || Ok(path)) + }) + .await + .context("unexpected error loading config") +} diff --git a/sqlx-cli/src/migrate.rs b/sqlx-cli/src/migrate.rs index 45a38b202a..6002536c33 100644 --- a/sqlx-cli/src/migrate.rs +++ b/sqlx-cli/src/migrate.rs @@ -2,7 +2,9 @@ use crate::config::Config; use crate::opt::{AddMigrationOpts, ConnectOpts, MigrationSourceOpt}; use anyhow::{bail, Context}; use console::style; -use sqlx::migrate::{AppliedMigration, Migrate, MigrateError, MigrationType, Migrator}; +use sqlx::migrate::{ + AppliedMigration, Migrate, MigrateError, MigrationType, Migrator, ResolveWith, +}; use sqlx::Connection; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; diff --git a/sqlx-core/src/any/connection/mod.rs b/sqlx-core/src/any/connection/mod.rs index 8cf8fc510c..509e8f5e93 100644 --- a/sqlx-core/src/any/connection/mod.rs +++ b/sqlx-core/src/any/connection/mod.rs @@ -40,6 +40,20 @@ impl AnyConnection { }) } + /// UNSTABLE: for use with `sqlx-cli` + /// + /// Connect to the database, and instruct the nested driver to + /// read options from the sqlx.toml file as appropriate. + #[doc(hidden)] + pub fn connect_with_config(url: &str) -> BoxFuture<'static, Result> + where + Self: Sized, + { + let options: Result = url.parse(); + + Box::pin(async move { Self::connect_with(&options?.allow_config_file()).await }) + } + pub(crate) fn connect_with_db( options: &AnyConnectOptions, ) -> BoxFuture<'_, crate::Result> diff --git a/sqlx-core/src/any/options.rs b/sqlx-core/src/any/options.rs index bb29d817c9..5ed68efec5 100644 --- a/sqlx-core/src/any/options.rs +++ b/sqlx-core/src/any/options.rs @@ -19,6 +19,7 @@ use url::Url; pub struct AnyConnectOptions { pub database_url: Url, pub log_settings: LogSettings, + pub enable_config: bool, } impl FromStr for AnyConnectOptions { type Err = Error; @@ -29,6 +30,7 @@ impl FromStr for AnyConnectOptions { .parse::() .map_err(|e| Error::Configuration(e.into()))?, log_settings: LogSettings::default(), + enable_config: false, }) } } @@ -40,6 +42,7 @@ impl ConnectOptions for AnyConnectOptions { Ok(AnyConnectOptions { database_url: url.clone(), log_settings: LogSettings::default(), + enable_config: false, }) } @@ -63,3 +66,15 @@ impl ConnectOptions for AnyConnectOptions { self } } + +impl AnyConnectOptions { + /// UNSTABLE: for use with `sqlx-cli` + /// + /// Allow nested drivers to extract configuration information from + /// the sqlx.toml file. + #[doc(hidden)] + pub fn allow_config_file(mut self) -> Self { + self.enable_config = true; + self + } +} diff --git a/sqlx-core/src/config/common.rs b/sqlx-core/src/config/common.rs index 2d5342d5b8..b3623d43fe 100644 --- a/sqlx-core/src/config/common.rs +++ b/sqlx-core/src/config/common.rs @@ -40,6 +40,14 @@ pub struct Config { /// The query macros used in `foo` will use `FOO_DATABASE_URL`, /// and the ones used in `bar` will use `BAR_DATABASE_URL`. pub database_url_var: Option, + + /// Settings for specific database drivers. + /// + /// These settings apply when checking queries, or when applying + /// migrations via `sqlx-cli`. These settings *do not* apply when + /// applying migrations via the macro, as that uses the run-time + /// database connection configured by the application. + pub drivers: Drivers, } impl Config { @@ -47,3 +55,34 @@ impl Config { self.database_url_var.as_deref().unwrap_or("DATABASE_URL") } } + +/// Configuration for specific database drivers. +#[derive(Debug, Default)] +#[cfg_attr( + feature = "sqlx-toml", + derive(serde::Deserialize), + serde(default, rename_all = "kebab-case") +)] +pub struct Drivers { + /// Specify options for the SQLite driver. + pub sqlite: SQLite, +} + +/// Configuration for the SQLite database driver. +#[derive(Debug, Default)] +#[cfg_attr( + feature = "sqlx-toml", + derive(serde::Deserialize), + serde(default, rename_all = "kebab-case") +)] +pub struct SQLite { + /// Specify extensions to load. + /// + /// # Example: Load the "uuid" and "vsv" extensions + /// `sqlx.toml`: + /// ```toml + /// [common.drivers.sqlite] + /// load-extensions = ["uuid", "vsv"] + /// ``` + pub load_extensions: Vec, +} diff --git a/sqlx-core/src/config/reference.toml b/sqlx-core/src/config/reference.toml index 77833fb5a8..787c3456db 100644 --- a/sqlx-core/src/config/reference.toml +++ b/sqlx-core/src/config/reference.toml @@ -15,6 +15,12 @@ # If not specified, defaults to `DATABASE_URL` database-url-var = "FOO_DATABASE_URL" +[common.drivers.sqlite] +# Load extensions into SQLite when running macros or migrations +# +# Defaults to an empty list, which has no effect. +load-extensions = ["uuid", "vsv"] + ############################################################################################### # Configuration for the `query!()` family of macros. diff --git a/sqlx-macros-core/src/migrate.rs b/sqlx-macros-core/src/migrate.rs index cfc3394757..fbf9254653 100644 --- a/sqlx-macros-core/src/migrate.rs +++ b/sqlx-macros-core/src/migrate.rs @@ -135,6 +135,15 @@ pub fn expand_with_path(config: &Config, path: &Path) -> crate::Result TryFrom<&'a AnyConnectOptions> for SqliteConnectOptions { fn try_from(opts: &'a AnyConnectOptions) -> Result { let mut opts_out = SqliteConnectOptions::from_url(&opts.database_url)?; opts_out.log_settings = opts.log_settings.clone(); + + if opts.enable_config { + let config = sqlx_core::config::Config::from_crate(); + for extension in config.common.drivers.sqlite.load_extensions.iter() { + opts_out = opts_out.extension(extension); + } + } + Ok(opts_out) } } diff --git a/sqlx-sqlite/src/lib.rs b/sqlx-sqlite/src/lib.rs index e4a122b6bd..afbe639ab2 100644 --- a/sqlx-sqlite/src/lib.rs +++ b/sqlx-sqlite/src/lib.rs @@ -127,8 +127,15 @@ pub static CREATE_DB_WAL: AtomicBool = AtomicBool::new(true); /// UNSTABLE: for use by `sqlite-macros-core` only. #[doc(hidden)] pub fn describe_blocking(query: &str, database_url: &str) -> Result, Error> { - let opts: SqliteConnectOptions = database_url.parse()?; + let mut opts: SqliteConnectOptions = database_url.parse()?; + + let config = sqlx_core::config::Config::from_crate(); + for extension in config.common.drivers.sqlite.load_extensions.iter() { + opts = opts.extension(extension); + } + let params = EstablishParams::from_options(&opts)?; + let mut conn = params.establish()?; // Execute any ancillary `PRAGMA`s diff --git a/tests/docker.py b/tests/docker.py index b1b81b07fb..5e8c74fb1f 100644 --- a/tests/docker.py +++ b/tests/docker.py @@ -17,9 +17,10 @@ def start_database(driver, database, cwd): database = path.join(cwd, database) (base_path, ext) = path.splitext(database) new_database = f"{base_path}.test{ext}" - shutil.copy(database, new_database) + if path.exists(database): + shutil.copy(database, new_database) # short-circuit for sqlite - return f"sqlite://{path.join(cwd, new_database)}" + return f"sqlite://{path.join(cwd, new_database)}?mode=rwc" res = subprocess.run( ["docker-compose", "up", "-d", driver],