python处理两份经纬度数据的对应关系

python 复制代码
# -*- coding: utf-8 -*-
import os.path
import json
import pandas as pd
from shapely.geometry import Polygon
from shapely.wkt import dumps
import argparse
import pickle
from glob import glob
import xml.etree.ElementTree as ET
import shutil
from tqdm import tqdm
# 创建解析器
parser = argparse.ArgumentParser(description="这是一个示例程序")
# 添加位置参数(必须提供)
parser.add_argument("-q","--query",default=None,  type=str, help="输入文件路径")
parser.add_argument("-t","--target",default="./",  type=str, help="输入文件路径")
parser.add_argument("-c","--copy",default=False,  type=bool, help="输入文件路径")
parser.add_argument("-s","--copydir",default="./database/",  type=str, help="输入文件路径")
# 读取Excel文件的默认工作表
# 解析参数
args = parser.parse_args()

#获取查询目标
def getQueryObjects(path):
    query_dict={}
    #处理 shiju
    df = pd.read_excel(path, sheet_name='buchongAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent

    df = pd.read_excel(path, sheet_name='dituAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    df = pd.read_excel(path, sheet_name='nanhaiAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    df = pd.read_excel(path, sheet_name='shijuAFB6_deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    # 以二进制写模式打开文件
    with open('query.pkl', 'wb') as file:
    # 将字典保存到文件中
        pickle.dump(query_dict, file)

def getPickleQuery(args):
    query_path = args.query
    if query_path is None:
        query_path = './query.pkl'
    # 以二进制读模式打开文件
    query_dict = {}
    with open(query_path, 'rb') as file:
        # 从文件中加载字典
        loaded_data = pickle.load(file)
    for key,extent in loaded_data.items():
        minx, miny, maxx, maxy = extent
        polygon = Polygon([(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)])
        query_dict[key] = polygon
        ccc=0
    return query_dict
def getTargets(args):
    target_dir = os.path.abspath(args.target)
    print(f"target_dir:{target_dir}")
    #递归查找所有meta.xml文件
    # 使用glob.glob()递归查找所有.txt文件
    target_dict={}
    files = glob(f'{target_dir}/**/*.meta.xml', recursive=True)
    print(f"files:{files}")
    files = [os.path.join(target_dir,_) for _ in files]
    for file in files:
        print(f"{file}")
        product_id,obj=readXmlFile(file,args)
        # if product_id is not None and obj is not None:
        #     target_dict[product_id]=obj
        if product_id is not None and obj is not None:
            target_dict[file]=obj
    return target_dict

def readCoord(element):
    Latitude = element.find('Latitude')
    if Latitude is not None:
        latitude = Latitude.text
    else:
        return None,None
    Longitude = element.find('Longitude')
    if Longitude is not None:
        longitude = Longitude.text
    else:
        return None,None
    return latitude, longitude

def readXmlFile(file,args):
    # 解析XML文件并返回ElementTree对象
    tree = ET.parse(file)
    target_dir = os.path.abspath(args.copydir)
    os.makedirs(target_dir, exist_ok=True)
    print(f"target_dir:{target_dir}")
    err_log = os.path.join(target_dir, "error.log")
    # 获取XML文档的根元素
    root = tree.getroot()
    product_id = None
    # 可以使用元素对象的`.text`属性访问元素的文本内容,使用`.attrib`属性访问元素的属性。
    element = root.find('ProductID')
    if element is not None:
        product_id = element.text
    if product_id is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} ProductID not found \n")
        return None,None
    element_imginfo = root.find('ImageInfo')
    if element_imginfo is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} ImageInfo not found\n")
        return None,None
    element_Corners = element_imginfo.find('Corners')
    if element_Corners is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} \n")
        return None,None
    element = element_Corners.find('UpperLeft')
    upperLeft = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} UpperLeft not found \n")
            return None, None
        else:
            upperLeft = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} UpperLeft not found\n")
        return None,None
    element = element_Corners.find('UpperRight')
    upperRight = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} UpperRight not found \n")
            return None, None
        else:
            upperRight = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} UpperRight not found \n")
        return None,None
    element = element_Corners.find('LowerLeft')
    lowerLeft = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} LowerLeft not found\n")
            return None, None
        else:
            lowerLeft = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} LowerLeft not found\n")
        return None,None
    element = element_Corners.find('LowerRight')
    lowerRight = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} LowerRight not found\n")
            return None, None
        else:
            lowerRight = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} LowerRight not found \n")
        return None,None
    return product_id,Polygon([upperLeft, upperRight, lowerRight, lowerLeft])







    ccc=0

#判断两者是否相交或者包含
def isMatch(polygon1,polygon2):
    ismatch=False
    #如果 相交 包含 被包含 为 True
    if polygon1.intersects(polygon2) or polygon1.contains(polygon2) or polygon1.within(polygon2):
        ismatch=True
    return ismatch
def matchTargets(load_data,target_data,args):
    target_dir = os.path.abspath(args.copydir)
    os.makedirs(target_dir, exist_ok=True)
    print(f"target_dir:{target_dir}")
    match_dict={}
    for name,query_polygon in load_data.items():
        matchs=[]
        for product_id,obj in target_data.items():
            ismatch = isMatch(query_polygon,obj)
            if ismatch:
                #匹配成功
                matchs.append(product_id)
        if len(matchs)>0:
            #如果匹配成功
            match_dict[name]=matchs
        else:
            file_log = os.path.join(target_dir,"not_match.txt")
            with open(file_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{name} not matched\n")
    return match_dict

if __name__ == '__main__':
    print(f"-q:{args.query}")
    print(f"-t:{args.target}")
    print(f"-c:{args.copy}")
    print(f"-s:{args.copydir}")
    target_dir = os.path.abspath(args.copydir)
    print(f"target_dir:{target_dir}")
    loaded_data = getPickleQuery(args)
    targets_data = getTargets(args)
    match_dict=matchTargets(loaded_data,targets_data,args)
    #输出匹配字典
    # 写入JSON文件
    data_json = os.path.join(target_dir,"data.json")
    with open(data_json, "w", encoding='utf-8') as f:
        json.dump(match_dict, f, ensure_ascii=False, indent=4)
    #复制文件到目录
    copy_log = os.path.join(target_dir,"copy_log.txt")
    print(f"copy_log:{copy_log}")
    iscopy = args.copy
    print(f"iscopy:{iscopy}")
    if iscopy:
        copydir=args.copydir
        os.makedirs(copydir, exist_ok=True)
        for name,values in tqdm(match_dict.items()):
            print(f"deal {name} .......")
            #目标目录
            dest_dir = os.path.join(copydir, name)
            #源目录
            source_dirs=[]
            for value in values:
                dir = os.path.dirname(value)
                if dir not in source_dirs:
                    source_dirs.append(dir)
            for source_dir in source_dirs:
                # 执行复制操作
                dir_name = os.path.basename(source_dir)
                dest_dirt = os.path.join(dest_dir, dir_name)
                with open(copy_log, 'a',encoding="utf-8") as fw:
                    fw.write(f'{source_dir} -> {dest_dirt}\n')
                print(f"copy src:{source_dir} dest:{dest_dirt}")
                shutil.copytree(source_dir, dest_dirt)
    print(f"OK!")
相关推荐
小磊哥er6 分钟前
【前端工程化】前端工作中的业务规范有哪些
前端
ᥬ 小月亮16 分钟前
webpack基础
前端·webpack
凛铄linshuo31 分钟前
爬虫简单实操2——以贴吧为例爬取“某吧”前10页的网页代码
爬虫·python·学习
牛客企业服务34 分钟前
2025年AI面试推荐榜单,数字化招聘转型优选
人工智能·python·算法·面试·职场和发展·金融·求职招聘
YongGit35 分钟前
探索 AI + MCP 渲染前端 UI
前端·后端·node.js
胡斌附体1 小时前
linux测试端口是否可被外部访问
linux·运维·服务器·python·测试·端口测试·临时服务器
qq_393828221 小时前
电脑休眠设置
windows·电脑·软件需求
慧一居士1 小时前
<script setup>中的setup作用以及和不带的区别对比
前端
likeGhee1 小时前
python缓存装饰器实现方案
开发语言·python·缓存
RainbowSea2 小时前
NVM 切换 Node 版本工具的超详细安装说明
java·前端