1. 整库导出为一个SQL文件
bash
database="<your-database-name>"
cat << EOF > $database.sql
drop database if exists $database cascade;
create database if not exists $database;
use $database;
EOF
for table in $(beeline -n hadoop -u jdbc:hive2:// --showHeader=false --outputformat=tsv2 -e "use $database; show tables;"); do
beeline -n hadoop -u jdbc:hive2:// --showHeader=false --outputformat=tsv2 -e "show create table $database.$table;" >> $database.sql
echo ";" >> $database.sql
done
2. 从SQL文件导入数据库
bash
spark-sql -f /path/to/your-exported-db.sql