Skip to content

Instantly share code, notes, and snippets.

@TheLurps
Last active April 1, 2025 09:36
Show Gist options
  • Select an option

  • Save TheLurps/ae704899fdb05599c9b4f6f0fd8fce03 to your computer and use it in GitHub Desktop.

Select an option

Save TheLurps/ae704899fdb05599c9b4f6f0fd8fce03 to your computer and use it in GitHub Desktop.
DuckDB sampling from parquet and hive partitioned parquet files
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
import duckdb
import numpy as np
import pandas as pd
from tempfile import TemporaryDirectory, NamedTemporaryFile
conn = duckdb.connect(":memory:")
conn.execute("SET threads = 1;")
# generate data
np.random.seed(42)
size = 1_000_000
df = pd.DataFrame(
{
"range": range(size),
"bin": np.random.randint(0, 10, size=size),
}
)
print("sampling from df")
print(
conn.sql("""
SELECT range
FROM df
USING SAMPLE reservoir(10 ROWS)
REPEATABLE(42);
""").fetchall()
)
# write to single parquet
parquet_file = NamedTemporaryFile(suffix=".parquet")
conn.sql(f"""
COPY (FROM df)
TO '{parquet_file.name}'
(FORMAT 'parquet', OVERWRITE);
""")
print("sampling from single parquet")
print(
conn.sql(f"""
SELECT range
FROM '{parquet_file.name}'
USING SAMPLE reservoir(10 ROWS)
REPEATABLE(42);
""").fetchall()
)
parquet_file.close()
# write a hive partitioned parquet files
hive_path = TemporaryDirectory()
conn.sql(f"""
COPY (FROM df)
TO '{hive_path.name}'
(FORMAT 'parquet', PARTITION_BY bin, OVERWRITE);
""")
print("sampling from hive partitioned parquet files")
print(
conn.sql(f"""
SELECT range
FROM read_parquet('{hive_path.name}/*/*.parquet', hive_partitioning = true)
USING SAMPLE reservoir(10 ROWS)
REPEATABLE(42);
""").fetchall()
)
hive_path.cleanup()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment