Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions datafusion/core/tests/sql/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ pub mod parquet;
pub mod parquet_schema;
pub mod partitioned_csv;
pub mod predicates;
pub mod projection;
pub mod references;
pub mod repartition;
pub mod select;
Expand Down Expand Up @@ -456,23 +455,6 @@ async fn register_aggregate_csv_by_sql(ctx: &SessionContext) {
);
}

async fn register_aggregate_simple_csv(ctx: &SessionContext) -> Result<()> {
// It's not possible to use aggregate_test_100 as it doesn't have enough similar values to test grouping on floats.
let schema = Arc::new(Schema::new(vec![
Field::new("c1", DataType::Float32, false),
Field::new("c2", DataType::Float64, false),
Field::new("c3", DataType::Boolean, false),
]));

ctx.register_csv(
"aggregate_simple",
"tests/data/aggregate_simple.csv",
CsvReadOptions::new().schema(&schema),
)
.await?;
Ok(())
}

async fn register_aggregate_csv(ctx: &SessionContext) -> Result<()> {
let testdata = datafusion::test_util::arrow_test_data();
let schema = test_util::aggr_test_schema();
Expand Down
20 changes: 1 addition & 19 deletions datafusion/core/tests/sql/partitioned_csv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,13 @@

use std::{io::Write, sync::Arc};

use arrow::{
datatypes::{DataType, Field, Schema, SchemaRef},
record_batch::RecordBatch,
};
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use datafusion::{
error::Result,
prelude::{CsvReadOptions, SessionConfig, SessionContext},
};
use tempfile::TempDir;

/// Execute SQL and return results
async fn plan_and_collect(
ctx: &mut SessionContext,
sql: &str,
) -> Result<Vec<RecordBatch>> {
ctx.sql(sql).await?.collect().await
}

/// Execute SQL and return results
pub async fn execute(sql: &str, partition_count: usize) -> Result<Vec<RecordBatch>> {
let tmp_dir = TempDir::new()?;
let mut ctx = create_ctx(&tmp_dir, partition_count).await?;
plan_and_collect(&mut ctx, sql).await
}

/// Generate CSV partitions within the supplied directory
fn populate_csv_partitions(
tmp_dir: &TempDir,
Expand Down
Loading