hive 小文件分析

1、获取fsimage文件:

hdfs dfsadmin -fetchImage /data/xy/

2、从二进制文件解析:

hdfs oiv -i /data/xy/fsimage_0000000019891608958 -t /data/xy/tmpdir -o /data/xy/out -p Delimited -delimiter ","

3、创建hive表

create database if not exists hdfsinfo;

use hdfsinfo;

CREATE TABLE fsimage_info_csv(

path string,

replication int,

modificationtime string,

accesstime string,

preferredblocksize bigint,

blockscount int,

filesize bigint,

nsquota string,

dsquota string,

permission string,

username string,

groupname string)

ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'

WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'=',')

STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat';

4、存储HDFS元数据加载进hive中

hdfs dfs -put /data/xy/out /user/hive/warehouse/hdfsinfo.db/fsimage_info_csv/

hdfs dfs -ls /user/hive/warehouse/hdfsinfo.db/fsimage_info_csv/

Hive: MSCK REPAIR TABLE hdfsinfo.fsimage_info_csv;

select * from hdfsinfo.fsimage_info_csv limit 5;

5、统计叶子目录下小文件数据量(4194304 H字节,即<4M)

SELECT

dir_path ,

COUNT(*) AS small_file_num,

modificationtime,

accesstime

FROM

( SELECT

modificationtime,

accesstime,

relative_size,

dir_path

FROM

(

SELECT

(CASE filesize < 4194304 WHEN TRUE THEN 'small' ELSE 'large' END) AS relative_size,

modificationtime,

accesstime,

split(

substr(

concat_ws('/', split(PATH, '/')),

1,

length(concat_ws('/', split(PATH, '/'))) - length(last_element) - 1

),

',')[0] as dir_path

FROM (

SELECT

modificationtime,

accesstime,

filesize,

PATH,

split(PATH, '/')[size(split(PATH, '/')) - 1] as last_element

FROM hdfsinfo.fsimage_info_csv

) t0 ) t1

WHERE

relative_size='small') t2

GROUP BY

dir_path,modificationtime,accesstime

ORDER BY

small_file_num desc

limit 500;

5、统计叶子目录下小文件数据量(4194304 H字节,即<4M)

SELECT

dir_path,

COUNT(*) AS small_file_num

FROM

( SELECT

relative_size,

dir_path

FROM

(

SELECT

(CASE filesize < 41943040 WHEN TRUE THEN 'small' ELSE 'large' END) AS relative_size,

split(

substr(

concat_ws('/', split(PATH, '/')),

1,

length(concat_ws('/', split(PATH, '/'))) - length(last_element) - 1

),

',')[0] as dir_path

FROM (

SELECT

filesize,

PATH,

split(PATH, '/')[size(split(PATH, '/')) - 1] as last_element

FROM hdfsinfo.fsimage_info_csv

WHERE

permission not LIKE 'd%'

) t0 ) t1

WHERE

relative_size='small') t2

GROUP BY

dir_path

ORDER BY

small_file_num desc

limit 50000;

相关推荐
yumgpkpm9 小时前
CMP (类ClouderaCDP7.3(404次编译) )华为鲲鹏Aarch64(ARM)信创环境 查询2100w行 hive 查询策略
数据库·数据仓库·hive·hadoop·flink·mapreduce·big data
CoookeCola21 小时前
MovieNet(A holistic dataset for movie understanding) :面向电影理解的多模态综合数据集与工具链
数据仓库·人工智能·目标检测·计算机视觉·数据挖掘
K_i1341 天前
Hadoop 集群自动化运维实战
运维·hadoop·自动化
Q26433650231 天前
【有源码】基于Python与Spark的火锅店数据可视化分析系统-基于机器学习的火锅店综合竞争力评估与可视化分析-基于用户画像聚类的火锅店市场细分与可视化研究
大数据·hadoop·python·机器学习·数据分析·spark·毕业设计
想ai抽2 天前
深入starrocks-多列联合统计一致性探查与策略(YY一下)
java·数据库·数据仓库
starfalling10242 天前
【hive】一种高效增量表的实现
hive
顧棟2 天前
【Yarn实战】Yarn 2.9.1滚动升级到3.4.1调研与实践验证
hadoop·yarn
D明明就是我2 天前
Hive 拉链表
数据仓库·hive·hadoop
嘉禾望岗5032 天前
hive join优化和数据倾斜处理
数据仓库·hive·hadoop
yumgpkpm2 天前
华为鲲鹏 Aarch64 环境下多 Oracle 数据库汇聚操作指南 CMP(类 Cloudera CDP 7.3)
大数据·hive·hadoop·elasticsearch·zookeeper·big data·cloudera