Fix CI after Rust 1.80, remove dead feature references (#3381)

* fix(ci): update workflows/examples.yml

* Removed archived `actions-rs` actions
* Upgraded `Swatinem/rust-cache`, `actions/checkout`, `actions/download-artifact`, `actions/upload-artifact`

* chore: deprecate `AnyKind`

* fix: remove dead references to DB features in `sqlx-core`

This reactivates a couple of tests that had stopped working.

* chore(ci): move sqlx-cli checks to their own workflow

* fix(ci): remove remaining `uses: actions-rs/cargo`

* fix warnings

* chore: add titles to sqlx-cli jobs

* fix warnings (2)

* fix error in pool example

* fix warnings (3)

* fix query_builder test

* fix: don't run CLI tests on Windows

* chore: upgrade `rust-toolchain` to 1.80

* fix(postgres): fix missing Tokio specialization in `PgCopyIn`

Caught by the new `unexpected_cfgs` lint.

* fix new warnings
This commit is contained in:
Austin Bonander 2024-07-26 23:15:32 -07:00 committed by GitHub
parent 1e526a2bbf
commit 6651d2df72
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 334 additions and 352 deletions

View file

@ -12,31 +12,24 @@ jobs:
name: Build SQLx CLI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Use latest Rust
run: rustup override set stable
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: sqlx-cli
- uses: actions-rs/cargo@v1
with:
command: build
args: >
- run: >
cargo build
-p sqlx-cli
--bin sqlx
--release
--no-default-features
--features mysql,postgres,sqlite
env:
RUSTFLAGS: -D warnings
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: sqlx-cli
path: target/release/sqlx
@ -56,7 +49,7 @@ jobs:
steps:
- name: Get SQLx-CLI
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: sqlx-cli
# $HOME is interpreted differently by the shell
@ -68,15 +61,9 @@ jobs:
echo /home/runner/.local/bin >> $GITHUB_PATH
sleep 10
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: mysql-examples
@ -87,13 +74,9 @@ jobs:
run: sqlx db setup
- name: Todos (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: mysql://root:password@localhost:3306/todos?ssl-mode=disabled
with:
# TODO: test full CLI
command: run
args: -p sqlx-example-mysql-todos
run: cargo run -p sqlx-example-mysql-todos
postgres:
name: PostgreSQL Examples
@ -110,7 +93,7 @@ jobs:
steps:
- name: Get SQLx-CLI
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: sqlx-cli
path: /home/runner/.local/bin
@ -121,15 +104,9 @@ jobs:
echo $HOME/.local/bin >> $GITHUB_PATH
sleep 10
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: pg-examples
@ -140,28 +117,19 @@ jobs:
run: sqlx db setup
- name: Axum Social with Tests (Check)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/axum-social
with:
command: check
args: -p sqlx-example-postgres-axum-social
run: cargo check -p sqlx-example-postgres-axum-social
- name: Axum Social with Tests (Test)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/axum-social
with:
command: test
args: -p sqlx-example-postgres-axum-social
run: cargo test -p sqlx-example-postgres-axum-social
# The Chat example has an interactive TUI which is not trivial to test automatically,
# so we only check that it compiles.
- name: Chat (Check)
uses: actions-rs/cargo@v1
with:
command: check
args: -p sqlx-example-postgres-chat
run: cargo check -p sqlx-example-postgres-chat
- name: Files (Setup)
working-directory: examples/postgres/files
@ -170,12 +138,9 @@ jobs:
run: sqlx db setup
- name: Files (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/files
with:
command: run
args: -p sqlx-example-postgres-files
run: cargo run -p sqlx-example-postgres-files
- name: JSON (Setup)
working-directory: examples/postgres/json
@ -184,12 +149,9 @@ jobs:
run: sqlx db setup
- name: JSON (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/json
with:
command: run
args: -p sqlx-example-postgres-json
run: cargo run -p sqlx-example-postgres-json
- name: Listen (Setup)
working-directory: examples/postgres/listen
@ -198,12 +160,9 @@ jobs:
run: sqlx db create
- name: Listen (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/listen
with:
command: run
args: -p sqlx-example-postgres-listen
run: cargo run -p sqlx-example-postgres-listen
- name: Mockable TODOs (Setup)
working-directory: examples/postgres/mockable-todos
@ -212,13 +171,9 @@ jobs:
run: sqlx db setup
- name: Mockable TODOs (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/mockable-todos
with:
# TODO: test full CLI
command: run
args: -p sqlx-example-postgres-mockable-todos
run: cargo run -p sqlx-example-postgres-mockable-todos
- name: TODOs (Setup)
working-directory: examples/postgres/todos
@ -227,13 +182,10 @@ jobs:
run: sqlx db setup
- name: TODOs (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/todos
with:
# TODO: test full CLI
command: run
args: -p sqlx-example-postgres-todos
# TODO: test full CLI
run: cargo run -p sqlx-example-postgres-todos
- name: Transaction (Setup)
working-directory: examples/postgres/transaction
@ -242,12 +194,9 @@ jobs:
run: sqlx db setup
- name: Transaction (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/txn
with:
command: run
args: -p sqlx-example-postgres-transaction
run: cargo run -p sqlx-example-postgres-transaction
sqlite:
name: SQLite Examples
@ -256,7 +205,7 @@ jobs:
steps:
- name: Get SQLx-CLI
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: sqlx-cli
path: /home/runner/.local/bin
@ -266,15 +215,9 @@ jobs:
chmod +x /home/runner/.local/bin/sqlx
echo /home/runner/.local/bin >> $GITHUB_PATH
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: sqlite-examples
@ -284,9 +227,6 @@ jobs:
run: sqlx db setup --source=examples/sqlite/todos/migrations
- name: TODOs (Run)
uses: actions-rs/cargo@v1
env:
DATABASE_URL: sqlite://todos.sqlite
with:
command: run
args: -p sqlx-example-sqlite-todos
run: cargo run -p sqlx-example-sqlite-todos

100
.github/workflows/sqlx-cli.yml vendored Normal file
View file

@ -0,0 +1,100 @@
name: SQLx CLI
on:
pull_request:
push:
branches:
- main
- "*-dev"
jobs:
check:
name: Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: |
rustup update
rustup component add clippy
rustup toolchain install beta
rustup component add --toolchain beta clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --manifest-path sqlx-cli/Cargo.toml -- -D warnings
# Run beta for new warnings but don't break the build.
# Use a subdirectory of `target` to avoid clobbering the cache.
- run: >
cargo +beta clippy
--manifest-path sqlx-cli/Cargo.toml
--target-dir target/beta/
test:
name: Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
# Note: macOS-latest uses M1 Silicon (ARM64)
os:
- ubuntu-latest
# FIXME: migrations tests fail on Windows for whatever reason
# - windows-latest
- macOS-13
- macOS-latest
steps:
- uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
key: ${{ runner.os }}-test
- run: cargo test --manifest-path sqlx-cli/Cargo.toml
build:
name: Build
runs-on: ${{ matrix.os }}
strategy:
matrix:
# Note: macOS-latest uses M1 Silicon (ARM64)
os:
- ubuntu-latest
- windows-latest
- macOS-13
- macOS-latest
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-musl
args: --features openssl-vendored
bin: target/debug/cargo-sqlx
- os: windows-latest
target: x86_64-pc-windows-msvc
bin: target/debug/cargo-sqlx.exe
- os: macOS-13
target: x86_64-apple-darwin
bin: target/debug/cargo-sqlx
- os: macOS-latest
target: aarch64-apple-darwin
bin: target/debug/cargo-sqlx
steps:
- uses: actions/checkout@v4
- name: Use latest Rust
run: rustup override set stable
- uses: Swatinem/rust-cache@v2
with:
key: ${{ runner.os }}-cli
- run: cargo build --manifest-path sqlx-cli/Cargo.toml --bin cargo-sqlx ${{ matrix.args }}
- uses: actions/upload-artifact@v4
with:
name: cargo-sqlx-${{ matrix.target }}
path: ${{ matrix.bin }}

View file

@ -79,52 +79,6 @@ jobs:
--manifest-path sqlx-core/Cargo.toml
--features json,_rt-${{ matrix.runtime }},_tls-${{ matrix.tls }}
cli-test:
name: CLI Unit Test
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
key: ${{ runner.os }}-test
- run: cargo test --manifest-path sqlx-cli/Cargo.toml
cli:
name: CLI Binaries
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest] #, macOS-latest]
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-musl
args: --features openssl-vendored
bin: target/debug/cargo-sqlx
- os: windows-latest
target: x86_64-pc-windows-msvc
bin: target/debug/cargo-sqlx.exe
# FIXME: macOS build fails because of missing pin-project-internal
# - os: macOS-latest
# target: x86_64-apple-darwin
# bin: target/debug/cargo-sqlx
steps:
- uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
key: ${{ runner.os }}-cli
- run: cargo build --manifest-path sqlx-cli/Cargo.toml --bin cargo-sqlx ${{ matrix.args }}
- uses: actions/upload-artifact@v2
with:
name: cargo-sqlx-${{ matrix.target }}
path: ${{ matrix.bin }}
sqlite:
name: SQLite
runs-on: ubuntu-22.04

View file

@ -355,3 +355,9 @@ required-features = ["postgres", "macros", "migrate"]
name = "postgres-migrate"
path = "tests/postgres/migrate.rs"
required-features = ["postgres", "macros", "migrate"]
[[test]]
name = "postgres-query-builder"
path = "tests/postgres/query_builder.rs"
required-features = ["postgres"]

View file

@ -2,5 +2,5 @@
# We reserve the right to increase this version at any time without considering it to be a breaking change.
# See the answer in FAQ.md for details.
[toolchain]
channel = "1.78"
channel = "1.80"
profile = "minimal"

View file

@ -1,6 +1,13 @@
// Annoying how deprecation warnings trigger in the same module as the deprecated item.
#![allow(deprecated)]
// Cargo features are broken in this file.
// `AnyKind` may return at some point but it won't be a simple enum.
#![allow(unexpected_cfgs)]
use crate::error::Error;
use std::str::FromStr;
#[deprecated = "not used or returned by any API"]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum AnyKind {
#[cfg(feature = "postgres")]

View file

@ -33,6 +33,7 @@ pub use connection::AnyConnection;
use crate::encode::Encode;
pub use connection::AnyConnectionBackend;
pub use database::Any;
#[allow(deprecated)]
pub use kind::AnyKind;
pub use options::AnyConnectOptions;
pub use query_result::AnyQueryResult;

View file

@ -4,7 +4,7 @@ use std::{cmp, io};
use crate::error::Error;
use crate::io::{Decode, Encode};
use crate::io::{AsyncRead, AsyncReadExt, Decode, Encode};
// Tokio, async-std, and std all use this as the default capacity for their buffered I/O.
const DEFAULT_BUF_SIZE: usize = 8192;
@ -168,6 +168,25 @@ impl WriteBuffer {
self.sanity_check();
}
/// Read into the buffer from `source`, returning the number of bytes read.
///
/// The buffer is automatically advanced by the number of bytes read.
pub async fn read_from(&mut self, mut source: impl AsyncRead + Unpin) -> io::Result<usize> {
let read = match () {
// Tokio lets us read into the buffer without zeroing first
#[cfg(feature = "_rt-tokio")]
_ => source.read_buf(self.buf_mut()).await?,
#[cfg(not(feature = "_rt-tokio"))]
_ => source.read(self.init_remaining_mut()).await?,
};
if read > 0 {
self.advance(read);
}
Ok(read)
}
pub fn is_empty(&self) -> bool {
self.bytes_flushed >= self.bytes_written
}

View file

@ -54,24 +54,6 @@
//! [`Pool::acquire`] or
//! [`Pool::begin`].
use self::inner::PoolInner;
#[cfg(all(
any(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
),
feature = "any"
))]
use crate::any::{Any, AnyKind};
use crate::connection::Connection;
use crate::database::Database;
use crate::error::Error;
use crate::transaction::Transaction;
use event_listener::EventListener;
use futures_core::FusedFuture;
use futures_util::FutureExt;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
@ -79,6 +61,21 @@ use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use event_listener::EventListener;
use futures_core::FusedFuture;
use futures_util::FutureExt;
use crate::connection::Connection;
use crate::database::Database;
use crate::error::Error;
use crate::transaction::Transaction;
pub use self::connection::PoolConnection;
use self::inner::PoolInner;
#[doc(hidden)]
pub use self::maybe::MaybePoolConnection;
pub use self::options::{PoolConnectionMetadata, PoolOptions};
#[macro_use]
mod executor;
@ -89,12 +86,6 @@ mod connection;
mod inner;
mod options;
pub use self::connection::PoolConnection;
pub use self::options::{PoolConnectionMetadata, PoolOptions};
#[doc(hidden)]
pub use self::maybe::MaybePoolConnection;
/// An asynchronous pool of SQLx database connections.
///
/// Create a pool with [Pool::connect] or [Pool::connect_with] and then call [Pool::acquire]
@ -434,8 +425,7 @@ impl<DB: Database> Pool<DB> {
///
/// Do something when the pool is closed:
/// ```rust,no_run
/// # #[cfg(feature = "postgres")]
/// # async fn bleh() -> sqlx_core::error::Result<()> {
/// # async fn bleh() -> sqlx::Result<()> {
/// use sqlx::PgPool;
///
/// let pool = PgPool::connect("postgresql://...").await?;
@ -463,8 +453,7 @@ impl<DB: Database> Pool<DB> {
///
/// Cancel a long-running operation:
/// ```rust,no_run
/// # #[cfg(feature = "postgres")]
/// # async fn bleh() -> sqlx_core::error::Result<()> {
/// # async fn bleh() -> sqlx::Result<()> {
/// use sqlx::{Executor, PgPool};
///
/// let pool = PgPool::connect("postgresql://...").await?;
@ -472,16 +461,26 @@ impl<DB: Database> Pool<DB> {
/// let pool2 = pool.clone();
///
/// tokio::spawn(async move {
/// pool2.close_event().do_until(async {
/// // `do_until` yields the inner future's output wrapped in `sqlx::Result`,
/// // in this case giving a double-wrapped result.
/// let res: sqlx::Result<sqlx::Result<()>> = pool2.close_event().do_until(async {
/// // This statement normally won't return for 30 days!
/// // (Assuming the connection doesn't time out first, of course.)
/// pool2.execute("SELECT pg_sleep('30 days')").await;
/// pool2.execute("SELECT pg_sleep('30 days')").await?;
///
/// // If the pool is closed before the statement completes, this won't be printed.
/// // This is because `.do_until()` cancels the future it's given if the
/// // pool is closed first.
/// println!("Waited!");
///
/// Ok(())
/// }).await;
///
/// match res {
/// Ok(Ok(())) => println!("Wait succeeded"),
/// Ok(Err(e)) => println!("Error from inside do_until: {e:?}"),
/// Err(e) => println!("Error from do_until: {e:?}"),
/// }
/// });
///
/// // This normally wouldn't return until the above statement completed and the connection
@ -534,28 +533,6 @@ impl<DB: Database> Pool<DB> {
}
}
#[cfg(all(
any(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
),
feature = "any"
))]
impl Pool<Any> {
/// Returns the database driver currently in-use by this `Pool`.
///
/// Determined by the connection URL.
pub fn any_kind(&self) -> AnyKind {
self.0
.connect_options
.read()
.expect("write-lock holder panicked")
.kind()
}
}
/// Returns a new [Pool] tied to the same shared connection pool.
impl<DB: Database> Clone for Pool<DB> {
fn clone(&self) -> Self {

View file

@ -593,118 +593,3 @@ where
self
}
}
#[cfg(all(test, feature = "postgres"))]
mod test {
use crate::postgres::Postgres;
use super::*;
#[test]
fn test_new() {
let qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
assert_eq!(qb.query, "SELECT * FROM users");
}
#[test]
fn test_push() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
let second_line = " WHERE last_name LIKE '[A-N]%;";
qb.push(second_line);
assert_eq!(
qb.query,
"SELECT * FROM users WHERE last_name LIKE '[A-N]%;".to_string(),
);
}
#[test]
#[should_panic]
fn test_push_panics_when_no_arguments() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users;");
qb.arguments = None;
qb.push("SELECT * FROM users;");
}
#[test]
fn test_push_bind() {
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new("SELECT * FROM users WHERE id = ");
qb.push_bind(42i32)
.push(" OR membership_level = ")
.push_bind(3i32);
assert_eq!(
qb.query,
"SELECT * FROM users WHERE id = $1 OR membership_level = $2"
);
}
#[test]
fn test_build() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
qb.push(" WHERE id = ").push_bind(42i32);
let query = qb.build();
assert_eq!(
query.statement.unwrap_left(),
"SELECT * FROM users WHERE id = $1"
);
assert_eq!(query.persistent, true);
}
#[test]
fn test_reset() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let _query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
qb.reset();
assert_eq!(qb.query, "");
}
#[test]
fn test_query_builder_reuse() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let _query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
qb.reset();
let query = qb.push("SELECT * FROM users WHERE id = 99").build();
assert_eq!(
query.statement.unwrap_left(),
"SELECT * FROM users WHERE id = 99"
);
}
#[test]
fn test_query_builder_with_args() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::new_with(query.sql(), query.take_arguments());
let query = qb.push("OR membership_level =").push_bind(3i32).build();
assert_eq!(
query.sql(),
"SELECT * FROM users WHERE id = $1 OR membership_level = $2"
);
}
}

View file

@ -196,13 +196,14 @@ fn foreign_key_depth(
}
#[test]
#[cfg(feature = "postgres")]
#[cfg(feature = "any")]
fn test_additive_fixture() -> Result<()> {
use crate::postgres::Postgres;
// Just need something that implements `Database`
use crate::any::Any;
let mut snapshot = FixtureSnapshot {
tables: BTreeMap::new(),
db: PhantomData::<Postgres>,
db: PhantomData::<Any>,
};
snapshot.tables.insert(

View file

@ -62,3 +62,8 @@ syn = { version = "2.0.52", default-features = false, features = ["full", "deriv
tempfile = { version = "3.10.1" }
quote = { version = "1.0.26", default-features = false }
url = { version = "2.2.2", default-features = false }
[lints.rust.unexpected_cfgs]
level = "warn"
# 1.80 will warn without this
check-cfg = ['cfg(sqlx_macros_unstable)', 'cfg(procmacro2_semver_exempt)']

View file

@ -1,5 +1,5 @@
use proc_macro2::{Ident, Span, TokenStream};
use quote::quote;
use quote::quote_spanned;
use syn::{
punctuated::Punctuated, token::Comma, Attribute, DeriveInput, Field, LitStr, Meta, Token, Type,
Variant,
@ -36,7 +36,7 @@ pub struct TypeName {
impl TypeName {
pub fn get(&self) -> TokenStream {
let val = &self.val;
quote! { #val }
quote_spanned! { self.span => #val }
}
}

View file

@ -98,7 +98,7 @@ impl DynQueryData {
return Ok(cached);
}
#[cfg(procmacr2_semver_exempt)]
#[cfg(procmacro2_semver_exempt)]
{
let path = path.as_ref().canonicalize()?;
let path = path.to_str().ok_or_else(|| {

View file

@ -1,14 +1,15 @@
use futures_core::future::BoxFuture;
use std::borrow::Cow;
use std::ops::{Deref, DerefMut};
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use sqlx_core::bytes::{BufMut, Bytes};
use crate::connection::PgConnection;
use crate::error::{Error, Result};
use crate::ext::async_stream::TryAsyncStream;
use crate::io::{AsyncRead, AsyncReadExt};
use crate::io::AsyncRead;
use crate::message::{
CommandComplete, CopyData, CopyDone, CopyFail, CopyResponse, MessageFormat, Query,
};
@ -45,7 +46,7 @@ impl PgConnection {
///
/// 1. by closing the connection, or:
/// 2. by using another connection to kill the server process that is sending the data as shown
/// [in this StackOverflow answer](https://stackoverflow.com/a/35319598).
/// [in this StackOverflow answer](https://stackoverflow.com/a/35319598).
///
/// If you don't read the stream to completion, the next time the connection is used it will
/// need to read and discard all the remaining queued data, which could take some time.
@ -98,7 +99,7 @@ pub trait PgPoolCopyExt {
///
/// 1. by closing the connection, or:
/// 2. by using another connection to kill the server process that is sending the data as shown
/// [in this StackOverflow answer](https://stackoverflow.com/a/35319598).
/// [in this StackOverflow answer](https://stackoverflow.com/a/35319598).
///
/// If you don't read the stream to completion, the next time the connection is used it will
/// need to read and discard all the remaining queued data, which could take some time.
@ -220,20 +221,12 @@ impl<C: DerefMut<Target = PgConnection>> PgCopyIn<C> {
// we get canceled or read 0 bytes, but that should be fine.
buf.put_slice(b"d\0\0\0\x04");
let read = match () {
// Tokio lets us read into the buffer without zeroing first
#[cfg(feature = "_rt-tokio")]
_ => source.read_buf(buf.buf_mut()).await?,
#[cfg(not(feature = "_rt-tokio"))]
_ => source.read(buf.init_remaining_mut()).await?,
};
let read = buf.read_from(&mut source).await?;
if read == 0 {
break;
}
buf.advance(read);
// Write the length
let read32 = u32::try_from(read)
.map_err(|_| err_protocol!("number of bytes read exceeds 2^32: {}", read))?;

View file

@ -1,5 +1,3 @@
use std::i16;
use crate::io::PgBufMutExt;
use crate::io::{BufMutExt, Encode};
use crate::types::Oid;

View file

@ -1,9 +1,9 @@
#![allow(clippy::rc_buffer)]
use std::cmp;
use std::os::raw::c_char;
use std::ptr::{null, null_mut, NonNull};
use std::sync::Arc;
use std::{cmp, i32};
use libsqlite3_sys::{
sqlite3, sqlite3_prepare_v3, sqlite3_stmt, SQLITE_OK, SQLITE_PREPARE_PERSISTENT,
@ -56,7 +56,7 @@ impl VirtualStatement {
pub(crate) fn new(mut query: &str, persistent: bool) -> Result<Self, Error> {
query = query.trim();
if query.len() > i32::max_value() as usize {
if query.len() > i32::MAX as usize {
return Err(err_protocol!(
"query string must be smaller than {} bytes",
i32::MAX

View file

@ -124,11 +124,11 @@
//! [Datatypes in SQLite: Type Affinity][type-affinity] (accessed 2023/11/20):
//!
//! > A column with NUMERIC affinity may contain values using all five storage classes.
//! When text data is inserted into a NUMERIC column, the storage class of the text is converted to
//! INTEGER or REAL (in order of preference) if the text is a well-formed integer or real literal,
//! respectively. If the TEXT value is a well-formed integer literal that is too large to fit in a
//! 64-bit signed integer, it is converted to REAL. For conversions between TEXT and REAL storage
//! classes, only the first 15 significant decimal digits of the number are preserved.
//! > When text data is inserted into a NUMERIC column, the storage class of the text is converted to
//! > INTEGER or REAL (in order of preference) if the text is a well-formed integer or real literal,
//! > respectively. If the TEXT value is a well-formed integer literal that is too large to fit in a
//! > 64-bit signed integer, it is converted to REAL. For conversions between TEXT and REAL storage
//! > classes, only the first 15 significant decimal digits of the number are preserved.
//!
//! With the SQLite3 interactive CLI, we can see that a higher-precision value
//! (20 digits in this case) is rounded off:

View file

@ -6,10 +6,13 @@ use std::sync::Once;
pub use sqlx_core::any::driver::install_drivers;
pub use sqlx_core::any::{
Any, AnyArguments, AnyConnectOptions, AnyExecutor, AnyKind, AnyPoolOptions, AnyQueryResult,
AnyRow, AnyStatement, AnyTransactionManager, AnyTypeInfo, AnyValue, AnyValueRef,
Any, AnyArguments, AnyConnectOptions, AnyExecutor, AnyPoolOptions, AnyQueryResult, AnyRow,
AnyStatement, AnyTransactionManager, AnyTypeInfo, AnyValue, AnyValueRef,
};
#[allow(deprecated)]
pub use sqlx_core::any::AnyKind;
pub(crate) mod reexports {
/// **SEE DOCUMENTATION BEFORE USE**. Type alias for `Pool<Any>`.
#[doc = include_str!("install_drivers_note.md")]

View file

@ -52,19 +52,19 @@
/// † Only callable if the query returns no columns; otherwise it's assumed the query *may* return at least one row.
/// ## Requirements
/// * The `DATABASE_URL` environment variable must be set at build-time to point to a database
/// server with the schema that the query string will be checked against. All variants of `query!()`
/// use [dotenv]<sup>1</sup> so this can be in a `.env` file instead.
/// server with the schema that the query string will be checked against.
/// All variants of `query!()` use [dotenv]<sup>1</sup> so this can be in a `.env` file instead.
///
/// * Or, `.sqlx` must exist at the workspace root. See [Offline Mode](#offline-mode-requires-the-offline-feature)
/// below.
///
/// * The query must be a string literal, or concatenation of string literals using `+` (useful
/// for queries generated by macro), or else it cannot be introspected (and thus cannot be dynamic
/// or the result of another macro).
/// for queries generated by macro), or else it cannot be introspected (and thus cannot be dynamic
/// or the result of another macro).
///
/// * The `QueryAs` instance will be bound to the same database type as `query!()` was compiled
/// against (e.g. you cannot build against a Postgres database and then run the query against
/// a MySQL database).
/// against (e.g. you cannot build against a Postgres database and then run the query against
/// a MySQL database).
///
/// * The schema of the database URL (e.g. `postgres://` or `mysql://`) will be used to
/// determine the database type.
@ -426,7 +426,7 @@ macro_rules! query_file_unchecked (
/// * The query must output at least one column.
/// * The column names of the query must match the field names of the struct.
/// * The field types must be the Rust equivalent of their SQL counterparts; see the corresponding
/// module for your database for mappings:
/// module for your database for mappings:
/// * Postgres: [crate::postgres::types]
/// * MySQL: [crate::mysql::types]
/// * Note: due to wire protocol limitations, the query macros do not know when

View file

@ -85,18 +85,6 @@ async fn test_pool_callbacks() -> anyhow::Result<()> {
let conn_options: AnyConnectOptions = std::env::var("DATABASE_URL")?.parse()?;
#[cfg(feature = "mssql")]
if conn_options.kind() == sqlx::any::AnyKind::Mssql {
// MSSQL doesn't support `CREATE TEMPORARY TABLE`,
// because why follow conventions when you can subvert them?
// Instead, you prepend `#` to the table name for a session-local temporary table
// which you also have to do when referencing it.
// Since that affects basically every query here,
// it's just easier to have a separate MSSQL-specific test case.
return Ok(());
}
let current_id = AtomicI32::new(0);
let pool = AnyPoolOptions::new()

View file

@ -0,0 +1,105 @@
use sqlx::postgres::Postgres;
use sqlx::query_builder::QueryBuilder;
use sqlx::Execute;
#[test]
fn test_new() {
let qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
assert_eq!(qb.sql(), "SELECT * FROM users");
}
#[test]
fn test_push() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
let second_line = " WHERE last_name LIKE '[A-N]%';";
qb.push(second_line);
assert_eq!(
qb.sql(),
"SELECT * FROM users WHERE last_name LIKE '[A-N]%';".to_string(),
);
}
#[test]
#[should_panic]
fn test_push_panics_after_build_without_reset() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users;");
let _query = qb.build();
qb.push("SELECT * FROM users;");
}
#[test]
fn test_push_bind() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users WHERE id = ");
qb.push_bind(42i32)
.push(" OR membership_level = ")
.push_bind(3i32);
assert_eq!(
qb.sql(),
"SELECT * FROM users WHERE id = $1 OR membership_level = $2"
);
}
#[test]
fn test_build() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("SELECT * FROM users");
qb.push(" WHERE id = ").push_bind(42i32);
let query = qb.build();
assert_eq!(query.sql(), "SELECT * FROM users WHERE id = $1");
assert_eq!(Execute::persistent(&query), true);
}
#[test]
fn test_reset() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let _query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
qb.reset();
assert_eq!(qb.sql(), "");
}
#[test]
fn test_query_builder_reuse() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let _query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
qb.reset();
let query = qb.push("SELECT * FROM users WHERE id = 99").build();
assert_eq!(query.sql(), "SELECT * FROM users WHERE id = 99");
}
#[test]
fn test_query_builder_with_args() {
let mut qb: QueryBuilder<'_, Postgres> = QueryBuilder::new("");
let mut query = qb
.push("SELECT * FROM users WHERE id = ")
.push_bind(42i32)
.build();
let mut qb: QueryBuilder<'_, Postgres> =
QueryBuilder::with_arguments(query.sql(), query.take_arguments().unwrap().unwrap());
let query = qb.push(" OR membership_level = ").push_bind(3i32).build();
assert_eq!(
query.sql(),
"SELECT * FROM users WHERE id = $1 OR membership_level = $2"
);
}