duckdb学习-1

DuckDB is a fast in-process analytical database

DuckDB supports a feature-rich SQL dialect complemented with deep integrations into client APIs

在notebook中使用duckdb

安装

pip install duckdb

示例代码:

python 复制代码
#> pip install jupysql
#> pip install duckdb-engine
import duckdb 
import pandas as pd

#在 jupysql 上设置配置,直接将数据输出到 Pandas,并简化打印到笔记本的输出。


%config SqlMagic.autopandas = True
%config SqlMagic.feedback = False
%config SqlMagic.displaycon = False


#使用 SQLAlchemy 样式的连接字符串将 jupysql 连接到 DuckDB。 连接到新的内存中 DuckDB、默认连接或文件支持的数据库:
#%sql duckdb:///:memory:
#%sql duckdb:///:default:
#%sql duckdb:///path/to/file.db

# 原生方式连接到DuckDB
#con = duckdb.connect("file.db")

%load_ext sql 
con = duckdb.connect("file.db")
%sql con --alias duckdb

# 查询
# 查询结果将显示为 Pandas DataFrame
# 单行的模式
%sql SELECT 'Off and flying!' AS a_duckdb_column;
# 多行的模式
%%sql
SELECT
    schema_name,
    function_name
FROM duckdb_functions()
ORDER BY ALL DESC
LIMIT 5;

# 结果赋值给变量
%sql res << SELECT 'Off and flying!' AS a_duckdb_column;

# 和pandas联动
%sql output_df << SELECT sum(i) AS total_i FROM input_df;
#
con.sql("select * fromdf")
con.sql("create table input_df as select * from input_df;")
con.sql("insert into input_df select * from input_df;")
# 导出
temp_df=con.sql("select * from input_df").df()

# DuckDB offers a relational API that can be used to chain together query operations. These are lazily evaluated so that DuckDB can optimize their execution. These operators can act on Pandas DataFrames, DuckDB tables or views (which can point to any underlying storage format that DuckDB can read, such as CSV or Parquet files, etc.). Here we show a simple example of reading from a Pandas DataFrame and returning a DataFrame.

rel=con.from_df(input_df)
transformed_rel =rel.filter("i>1").project("i,j,i*2 as k").order("i desc")
transformed_rel.df()

和ibis集成:  the portable Python dataframe library
Ibis (ibis-project.org)

数据的导入和导出

python 复制代码
# csv
SELECT * FROM read_csv('input.csv');
COPY tbl FROM 'input.csv';
# 导出
COPY tbl TO 'output.csv' (HEADER, DELIMITER ',');
COPY (SELECT * FROM tbl) TO 'output.csv' (HEADER, DELIMITER ',');



# parquet
SELECT * FROM read_parquet('input.parquet')
COPY tbl FROM 'input.parquet' (FORMAT PARQUET);
COPY tbl TO 'output.parquet' (FORMAT PARQUET);
COPY (SELECT * FROM tbl) TO 'output.parquet' (FORMAT PARQUET);


# json
SELECT * FROM read_json_auto('input.json');
COPY tbl FROM 'input.json';

COPY tbl TO 'output.json';
COPY (SELECT * FROM tbl) TO 'output.json';

# Excel
INSTALL spatial; 
LOAD spatial;
SELECT * FROM st_read('test_excel.xlsx', layer = 'Sheet1');

#Importing a Sheet with/without a Header
#The option HEADERS has three possible values:
#* FORCE: treat the first row as a header
#* DISABLE treat the first row as a row of data
#* AUTO attempt auto-detection (default)

SELECT * FROM st_read( 'test_excel.xlsx', layer = 'Sheet1', open_options = ['HEADERS=FORCE'] );

#The option FIELD_TYPE defines how field types should be treated:
#* STRING: all fields should be loaded as strings (VARCHAR type)
#* AUTO: field types should be auto-detected (default)
#For example, to treat the first row as a header and use auto-detection for types, run:

SELECT *FROM st_read(
    'test_excel.xlsx',
    layer = 'Sheet1',
    open_options = ['HEADERS=FORCE', 'FIELD_TYPES=AUTO']);
# 导出
COPY tbl TO 'output.xlsx' WITH (FORMAT GDAL, DRIVER 'xlsx');
COPY (SELECT * FROM tbl) TO 'output.xlsx' WITH (FORMAT GDAL, DRIVER 'xlsx');


# 从其他数据库导入
INSTALL mysql;
load mysql;
ATTACH 'host=localhost user=root port=0 database=mysqlscanner' AS mysql_db (TYPE mysql_scanner, READ_ONLY);
USE mysql_db;
# 可以直接对mysql进行读写

INSTALL postgres;
load postgres;
SELECT * FROM postgres_scan('host=localhost port=5432 dbname=mydb', 'public', 'mytable');

INSTALL sqlite;
load sqlite;

SELECT * FROM sqlite_scan('test.db', 'tbl_name');
-- attach the SQLite file "test.db" 
ATTACH 'test.db' AS test (TYPE sqlite); 
-- the table "tbl_name" can now be queried as if it is a regular table 
SELECT * FROM test.tbl_name; 
-- switch the active database to "test" 
USE test; 
-- list all tables in the file SHOW TABLES;

# 直接读取文件
SELECT size, parse_path(filename), content FROM read_text('test/sql/table_function/files/*.txt');

查询数据库的一些基础信息

sql 复制代码
#查看表信息: describe, show

describe tbname;
show tbname;

#描述查询: 
describe select * from dual;

#describe 可以使用子查询,这允许从描述创建表.
CREATE TABLE tbl_description AS SELECT * FROM (DESCRIBE tbl);

#解释执行计划:
EXPLAIN SELECT * FROM tbl;
SET explain_output = 'all';

EXPLAIN SELECT c_count, count(*) AS custdist FROM ( SELECT c_custkey, count(o_orderkey) FROM customer LEFT OUTER JOIN orders ON c_custkey = o_custkey AND o_comment NOT LIKE '%special%requests%' GROUP BY c_custkey) AS c_orders (c_custkey, c_count) GROUP BY c_count ORDER BY custdist DESC, c_count DESC;

#列出表信息:
show tables;
show all tables;

#对表或者查询进行summary: 
SUMMARIZE tbl;
SUMMARIZE SELECT * FROM tbl;

# 其他:
SELECT version();
PRAGMA platform;
SELECT * FROM duckdb_extensions();

meta table functions

  • duckdb_columns(): columns
  • duckdb_constraints(): constraints
  • duckdb_databases(): lists the databases that are accessible from within the current DuckDB process
  • duckdb_dependencies(): dependencies between objects
  • duckdb_extensions(): extensions
  • duckdb_functions(): functions
  • duckdb_indexes(): secondary indexes
  • duckdb_keywords(): DuckDB's keywords and reserved words
  • duckdb_optimizers(): the available optimization rules in the DuckDB instance
  • duckdb_schemas(): schemas
  • duckdb_sequences(): sequences
  • duckdb_settings(): settings
  • duckdb_tables(): base tables
  • duckdb_types(): data types
  • duckdb_views(): views
  • duckdb_temporary_files(): the temporary files DuckDB has written to disk, to offload data from memory
相关推荐
YJlio几秒前
桌面工具学习笔记(11.4):BgInfo + Desktops + ZoomIt 组合拳——演示与排障环境一键到位
笔记·学习·自动化
玩具猴_wjh5 分钟前
12.15 学习笔记
笔记·学习
shenghaide_jiahu8 分钟前
数学分析简明教程——6.3
学习
消失的旧时光-19438 分钟前
Java 线程池(第四篇):ScheduledThreadPoolExecutor 原理与定时任务执行机制全解析
java·开发语言
dudke11 分钟前
js的reduce详解
开发语言·javascript·ecmascript
kevin_水滴石穿12 分钟前
docker-compose.yml案例
java·服务器·开发语言
coderxiaohan12 分钟前
【C++】用哈希表封装unordered_map和unordered_set
开发语言·c++·散列表
谈笑也风生12 分钟前
验证IP地址(三)
python·tcp/ip·mysql
三川69813 分钟前
AVL树的学习
数据结构·学习·算法
梦幻精灵_cq14 分钟前
code-word.csv开始记录——我的new“工程”启动
python