hive 小文件分析

1、获取fsimage文件:

hdfs dfsadmin -fetchImage /data/xy/

2、从二进制文件解析:

hdfs oiv -i /data/xy/fsimage_0000000019891608958 -t /data/xy/tmpdir -o /data/xy/out -p Delimited -delimiter ","

3、创建hive表

create database if not exists hdfsinfo;

use hdfsinfo;

CREATE TABLE fsimage_info_csv(

path string,

replication int,

modificationtime string,

accesstime string,

preferredblocksize bigint,

blockscount int,

filesize bigint,

nsquota string,

dsquota string,

permission string,

username string,

groupname string)

ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'

WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'=',')

STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat';

4、存储HDFS元数据加载进hive中

hdfs dfs -put /data/xy/out /user/hive/warehouse/hdfsinfo.db/fsimage_info_csv/

hdfs dfs -ls /user/hive/warehouse/hdfsinfo.db/fsimage_info_csv/

Hive: MSCK REPAIR TABLE hdfsinfo.fsimage_info_csv;

select * from hdfsinfo.fsimage_info_csv limit 5;

5、统计叶子目录下小文件数据量(4194304 H字节,即<4M)

SELECT

dir_path ,

COUNT(*) AS small_file_num,

modificationtime,

accesstime

FROM

( SELECT

modificationtime,

accesstime,

relative_size,

dir_path

FROM

(

SELECT

(CASE filesize < 4194304 WHEN TRUE THEN 'small' ELSE 'large' END) AS relative_size,

modificationtime,

accesstime,

split(

substr(

concat_ws('/', split(PATH, '/')),

1,

length(concat_ws('/', split(PATH, '/'))) - length(last_element) - 1

),

',')[0] as dir_path

FROM (

SELECT

modificationtime,

accesstime,

filesize,

PATH,

split(PATH, '/')[size(split(PATH, '/')) - 1] as last_element

FROM hdfsinfo.fsimage_info_csv

) t0 ) t1

WHERE

relative_size='small') t2

GROUP BY

dir_path,modificationtime,accesstime

ORDER BY

small_file_num desc

limit 500;

5、统计叶子目录下小文件数据量(4194304 H字节,即<4M)

SELECT

dir_path,

COUNT(*) AS small_file_num

FROM

( SELECT

relative_size,

dir_path

FROM

(

SELECT

(CASE filesize < 41943040 WHEN TRUE THEN 'small' ELSE 'large' END) AS relative_size,

split(

substr(

concat_ws('/', split(PATH, '/')),

1,

length(concat_ws('/', split(PATH, '/'))) - length(last_element) - 1

),

',')[0] as dir_path

FROM (

SELECT

filesize,

PATH,

split(PATH, '/')[size(split(PATH, '/')) - 1] as last_element

FROM hdfsinfo.fsimage_info_csv

WHERE

permission not LIKE 'd%'

) t0 ) t1

WHERE

relative_size='small') t2

GROUP BY

dir_path

ORDER BY

small_file_num desc

limit 50000;

相关推荐
Volunteer Technology6 小时前
Hadoop之HDFS集群搭建与操作(二)
大数据·hadoop·hdfs
Volunteer Technology7 小时前
Hadoop之HDFS shell操作篇
大数据·hadoop·hdfs
青春万岁!!8 小时前
hive 动态分区参数设置错误导致数据不稳定
大数据·数据仓库·hive·hadoop
大大大大晴天️1 天前
浅聊Hadoop集群的主流安全方案(LDAP+Kerberos+Ranger)
大数据·hadoop·安全
roman_日积跬步-终至千里1 天前
为什么 Hive 无法通过同步 JDBC 导出百万级数据?
数据仓库·hive·hadoop
WL_Aurora1 天前
HDFS基础编程常用命令
大数据·hadoop·hdfs
大大大大晴天1 天前
浅聊Hadoop集群的主流安全方案(LDAP+Kerberos+Ranger)
大数据·hadoop
roman_日积跬步-终至千里1 天前
Hive JDBC vs MySQL JDBC:**“服务端推完就跑,客户端慢慢吃”**详解
数据仓库·hive·hadoop
计算机毕业编程指导师2 天前
【计算机毕设推荐】Python+Hadoop+Spark共享单车数据可视化分析系统 毕业设计 选题推荐 毕设选题 数据分析 机器学习 数据挖掘
大数据·hadoop·python·计算机·数据挖掘·spark·课程设计
计算机毕业编程指导师2 天前
【计算机毕设】基于Hadoop的共享单车订单数据分析系统+Python+Django全栈开发 毕业设计 选题推荐 毕设选题 数据分析 机器学习 数据挖掘
大数据·hadoop·python·计算机·数据挖掘·spark·django