python NLP数据集分割大文件
NLP数据文件有时候特别大的文件,需要分割成N个小文件来处理
部分提取:可以提取N份,每份K行
全部分割:分割整个文件,每一份K行
python
import os
def split_file(filename, outdir,num_lines):
""" 将文件按行数进行分割 \n
filename 文件名 \n
num_lines 每份包含的行数 \n
"""
file_name_without_path_and_extension = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'r') as f:
current_chunk = 1
current_line = 0
current_output = open(f"{outdir}{file_name_without_path_and_extension}{current_chunk}.txt", 'w')
for line in f:
current_output.write(line)
current_line += 1
if current_line >= num_lines:
current_output.close()
current_chunk += 1
current_line = 0
current_output = open(f"{outdir}{file_name_without_path_and_extension}{current_chunk}.txt", 'w')
current_output.close()
def split_file_max_chunks(filename,outdir, num_lines, max_chunks):
""" 将文件按行数进行分割 \n
filename 文件名 \n
num_lines 每份包含的行数 \n
max_chunks 最大分出多少份 \n
"""
file_name_without_path_and_extension = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'r') as f:
current_chunk = 1
current_line = 0
current_output = open(f"{outdir}{file_name_without_path_and_extension}{current_chunk}.txt", 'w')
for line in f:
current_output.write(line)
current_line += 1
if current_line >= num_lines:
current_output.close()
if current_chunk >= max_chunks:
break
current_chunk += 1
current_line = 0
current_output = open(f"{outdir}{file_name_without_path_and_extension}{current_chunk}.txt", 'w') # 这里更新了current_output
current_output.close()
def main():
large_filename = "./data/large_file_1G.txt"
outdir="./docs/"
num_lines = 1000 # 每份包含 1000 行
split_file(large_filename,outdir, num_lines)
# max_chunks = 30 # 最大分出 30 份
# split_file_max_chunks(large_filename,outdir, num_lines, max_chunks)
if __name__ == "__main__":
main()