python处理两份经纬度数据的对应关系

python 复制代码
# -*- coding: utf-8 -*-
import os.path
import json
import pandas as pd
from shapely.geometry import Polygon
from shapely.wkt import dumps
import argparse
import pickle
from glob import glob
import xml.etree.ElementTree as ET
import shutil
from tqdm import tqdm
# 创建解析器
parser = argparse.ArgumentParser(description="这是一个示例程序")
# 添加位置参数(必须提供)
parser.add_argument("-q","--query",default=None,  type=str, help="输入文件路径")
parser.add_argument("-t","--target",default="./",  type=str, help="输入文件路径")
parser.add_argument("-c","--copy",default=False,  type=bool, help="输入文件路径")
parser.add_argument("-s","--copydir",default="./database/",  type=str, help="输入文件路径")
# 读取Excel文件的默认工作表
# 解析参数
args = parser.parse_args()

#获取查询目标
def getQueryObjects(path):
    query_dict={}
    #处理 shiju
    df = pd.read_excel(path, sheet_name='buchongAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent

    df = pd.read_excel(path, sheet_name='dituAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    df = pd.read_excel(path, sheet_name='nanhaiAFB_6deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    df = pd.read_excel(path, sheet_name='shijuAFB6_deal').values
    row_nums, col_nums = df.shape
    for i in range (row_nums):
        key = df[i, 1]
        extent = df[i, 2]
        extent = [float(_) for _ in str(extent).split(",")]
        query_dict[key] = extent
    # 以二进制写模式打开文件
    with open('query.pkl', 'wb') as file:
    # 将字典保存到文件中
        pickle.dump(query_dict, file)

def getPickleQuery(args):
    query_path = args.query
    if query_path is None:
        query_path = './query.pkl'
    # 以二进制读模式打开文件
    query_dict = {}
    with open(query_path, 'rb') as file:
        # 从文件中加载字典
        loaded_data = pickle.load(file)
    for key,extent in loaded_data.items():
        minx, miny, maxx, maxy = extent
        polygon = Polygon([(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)])
        query_dict[key] = polygon
        ccc=0
    return query_dict
def getTargets(args):
    target_dir = os.path.abspath(args.target)
    print(f"target_dir:{target_dir}")
    #递归查找所有meta.xml文件
    # 使用glob.glob()递归查找所有.txt文件
    target_dict={}
    files = glob(f'{target_dir}/**/*.meta.xml', recursive=True)
    print(f"files:{files}")
    files = [os.path.join(target_dir,_) for _ in files]
    for file in files:
        print(f"{file}")
        product_id,obj=readXmlFile(file,args)
        # if product_id is not None and obj is not None:
        #     target_dict[product_id]=obj
        if product_id is not None and obj is not None:
            target_dict[file]=obj
    return target_dict

def readCoord(element):
    Latitude = element.find('Latitude')
    if Latitude is not None:
        latitude = Latitude.text
    else:
        return None,None
    Longitude = element.find('Longitude')
    if Longitude is not None:
        longitude = Longitude.text
    else:
        return None,None
    return latitude, longitude

def readXmlFile(file,args):
    # 解析XML文件并返回ElementTree对象
    tree = ET.parse(file)
    target_dir = os.path.abspath(args.copydir)
    os.makedirs(target_dir, exist_ok=True)
    print(f"target_dir:{target_dir}")
    err_log = os.path.join(target_dir, "error.log")
    # 获取XML文档的根元素
    root = tree.getroot()
    product_id = None
    # 可以使用元素对象的`.text`属性访问元素的文本内容,使用`.attrib`属性访问元素的属性。
    element = root.find('ProductID')
    if element is not None:
        product_id = element.text
    if product_id is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} ProductID not found \n")
        return None,None
    element_imginfo = root.find('ImageInfo')
    if element_imginfo is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} ImageInfo not found\n")
        return None,None
    element_Corners = element_imginfo.find('Corners')
    if element_Corners is None:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} \n")
        return None,None
    element = element_Corners.find('UpperLeft')
    upperLeft = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} UpperLeft not found \n")
            return None, None
        else:
            upperLeft = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} UpperLeft not found\n")
        return None,None
    element = element_Corners.find('UpperRight')
    upperRight = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} UpperRight not found \n")
            return None, None
        else:
            upperRight = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} UpperRight not found \n")
        return None,None
    element = element_Corners.find('LowerLeft')
    lowerLeft = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} LowerLeft not found\n")
            return None, None
        else:
            lowerLeft = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} LowerLeft not found\n")
        return None,None
    element = element_Corners.find('LowerRight')
    lowerRight = None
    if element is not None:
        latitude, longitude =readCoord(element)
        if latitude is  None or longitude is None:
            with open(err_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{file} LowerRight not found\n")
            return None, None
        else:
            lowerRight = (longitude, latitude)
    else:
        with open(err_log, 'a',encoding="utf-8") as fw:
            fw.write(f"{file} LowerRight not found \n")
        return None,None
    return product_id,Polygon([upperLeft, upperRight, lowerRight, lowerLeft])







    ccc=0

#判断两者是否相交或者包含
def isMatch(polygon1,polygon2):
    ismatch=False
    #如果 相交 包含 被包含 为 True
    if polygon1.intersects(polygon2) or polygon1.contains(polygon2) or polygon1.within(polygon2):
        ismatch=True
    return ismatch
def matchTargets(load_data,target_data,args):
    target_dir = os.path.abspath(args.copydir)
    os.makedirs(target_dir, exist_ok=True)
    print(f"target_dir:{target_dir}")
    match_dict={}
    for name,query_polygon in load_data.items():
        matchs=[]
        for product_id,obj in target_data.items():
            ismatch = isMatch(query_polygon,obj)
            if ismatch:
                #匹配成功
                matchs.append(product_id)
        if len(matchs)>0:
            #如果匹配成功
            match_dict[name]=matchs
        else:
            file_log = os.path.join(target_dir,"not_match.txt")
            with open(file_log, 'a',encoding="utf-8") as fw:
                fw.write(f"{name} not matched\n")
    return match_dict

if __name__ == '__main__':
    print(f"-q:{args.query}")
    print(f"-t:{args.target}")
    print(f"-c:{args.copy}")
    print(f"-s:{args.copydir}")
    target_dir = os.path.abspath(args.copydir)
    print(f"target_dir:{target_dir}")
    loaded_data = getPickleQuery(args)
    targets_data = getTargets(args)
    match_dict=matchTargets(loaded_data,targets_data,args)
    #输出匹配字典
    # 写入JSON文件
    data_json = os.path.join(target_dir,"data.json")
    with open(data_json, "w", encoding='utf-8') as f:
        json.dump(match_dict, f, ensure_ascii=False, indent=4)
    #复制文件到目录
    copy_log = os.path.join(target_dir,"copy_log.txt")
    print(f"copy_log:{copy_log}")
    iscopy = args.copy
    print(f"iscopy:{iscopy}")
    if iscopy:
        copydir=args.copydir
        os.makedirs(copydir, exist_ok=True)
        for name,values in tqdm(match_dict.items()):
            print(f"deal {name} .......")
            #目标目录
            dest_dir = os.path.join(copydir, name)
            #源目录
            source_dirs=[]
            for value in values:
                dir = os.path.dirname(value)
                if dir not in source_dirs:
                    source_dirs.append(dir)
            for source_dir in source_dirs:
                # 执行复制操作
                dir_name = os.path.basename(source_dir)
                dest_dirt = os.path.join(dest_dir, dir_name)
                with open(copy_log, 'a',encoding="utf-8") as fw:
                    fw.write(f'{source_dir} -> {dest_dirt}\n')
                print(f"copy src:{source_dir} dest:{dest_dirt}")
                shutil.copytree(source_dir, dest_dirt)
    print(f"OK!")
相关推荐
layman05282 小时前
ES6/ES11知识点 续五
前端·ecmascript·es6
纪元A梦3 小时前
华为OD机试真题——荒岛求生(2025A卷:200分)Java/python/JavaScript/C/C++/GO最佳实现
java·c语言·javascript·c++·python·华为od·go
Jiaberrr4 小时前
uniapp app 端获取陀螺仪数据的实现攻略
前端·javascript·vue.js·uni-app·陀螺仪
MINO吖4 小时前
项目改 pnpm 并使用 Monorepo 发布至 npm 上
前端·npm·node.js
Mi Manchi264 小时前
力扣热题100之搜索二维矩阵 II
python·leetcode·矩阵
电商数据girl5 小时前
【Python爬虫电商数据采集+数据分析】采集电商平台数据信息,并做可视化演示
java·开发语言·数据库·爬虫·python·数据分析
仰望星空的凡人5 小时前
【JS逆向基础】WEB自动化
python
明月看潮生5 小时前
Windows_PyCharm Python语言开发环境构建
windows·python·青少年编程·pycharm·编程与数学
生信碱移6 小时前
TCGA数据库临床亚型可用!贝叶斯聚类+特征网络分析,这篇 NC 提供的方法可以快速用起来了!
人工智能·python·算法·数据挖掘·数据分析
测试老哥6 小时前
Selenium使用指南
自动化测试·软件测试·python·selenium·测试工具·职场和发展·测试用例