猎聘爬虫(附源码)

废话不多说直接附源码

cookies需要替换成自己的 , 该网站在不登录的情况下只能请求到10页数据 , 想要获得完整数据需要携带登录后的cookies

python 复制代码
import requests
import json
from lxml import etree
import os
import openpyxl




headers = {
    "Accept": "application/json, text/plain, */*",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Content-Type": "application/json;charset=UTF-8",
    "Origin": "https://www.liepin.com",
    "Pragma": "no-cache",
    "Referer": "https://www.liepin.com/",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-site",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
    "X-Client-Type": "web",
    "X-Fscp-Bi-Stat": "{\"location\": \"https://www.liepin.com/zhaopin/?inputFrom=head_navigation&scene=init&workYearCode=0&ckId=jrkiappybgyczm7c2sk5zmfzwgpqpqia\"}",
    "X-Fscp-Fe-Version": "",
    "X-Fscp-Std-Info": "{\"client_id\": \"40108\"}",
    "X-Fscp-Trace-Id": "f22eb671-3c8f-4f94-8b14-e5e7d176be52",
    "X-Fscp-Version": "1.1",
    "X-Requested-With": "XMLHttpRequest",
    "X-XSRF-TOKEN": "hCnGTNiJQfe47qu4x2OChA",
    "sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Microsoft Edge\";v=\"122\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}


def spiderData():
    # 循环每一页
    for i in range(1, 21):
        # 配置账号参数
        cookies =  ''
        # 网页链接
        url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
        # 参数
        data = {
            "data": {
                "mainSearchPcConditionForm": {
                    "city": "410",
                    "dq": "410",
                    "pubTime": "",
                    "currentPage": f"{i}",
                    "pageSize": 40,
                    "key": "",
                    "suggestTag": "",
                    "workYearCode": "0",
                    "compId": "",
                    "compName": "",
                    "compTag": "",
                    "industry": "H01$H01",
                    "salary": "",
                    "jobKind": 2,
                    "compScale": "",
                    "compKind": "",
                    "compStage": "",
                    "eduLevel": ""
                },
                "passThroughForm": {
                    "scene": "init",
                    "ckId": "0nbwaavz2gngc40f8xmfp59in6ymulua",
                    "skId": "pf8wezdo0ezilzl4tyd1g4tcoyh43qe9",
                    "fkId": "0nbwaavz2gngc40f8xmfp59in6ymulua",
                    "suggest": None
                }
            }
        }
        data = json.dumps(data, separators=(',', ':'))
        response = requests.post(url, headers=headers, cookies=cookies, data=data).json()

        print(f"正在爬取第{i + 1}页")
        praseData(response)


# 这段代码主要用于解析和保存来自招聘网站的职位信息。下面是加上注释后的代码:
def praseData(data):
    for z in range(0,40):
        job_card_list = data.get('data', {}).get('data', {}).get('jobCardList', [])
        if 0 <= z < len(job_card_list):
            res_json_item = job_card_list[z]

            # 公司名称
            comp_name = res_json_item.get('comp', {}).get('compName')

            # 职位链接
            job_link = res_json_item.get('job', {}).get('link')

            # 工作地点
            place = res_json_item.get('job', {}).get('dq')

            # 薪资
            salary = res_json_item.get('job', {}).get('salary')

            # 职位名称
            job = res_json_item.get('job', {}).get('title')
        else:
            # 如果z不是有效索引或job_card_list为空,则处理错误或设置默认值
            comp_name = None
            job_link = None
            place = None
            salary = None
            job = None


            # 解析职位详情页面
        sub_data = requests.get(job_link, headers=headers).text
        # 使用 etree 解析 HTML 数据
        xml = etree.HTML(sub_data)
        # 尝试从详情页面中提取公司简介
        try:
            details = xml.xpath('//dl[@class="paragraph"]/dd/text()')[0]
        except:
            details = None

        # 公司简介
        companyProfile = xml.xpath("//div[@class='paragraph-box']/div/text()")
        company_profile = ','.join(companyProfile)

        # 公司信息

        try:
            intorduct = details.split('截止日期')[0].split()
            intorducts = ','.join(intorduct)
        except:
            intorducts = None

        # 保存到 excle 表格
        job_list = [job,place,salary,comp_name,company_profile,intorducts]
        print(job_list)
        save_data_to_xlsx(job_list)

# 保存到excle表格
def save_data_to_xlsx(data ):

    filename = f'job.xlsx'
    name_headers = ['职位', '地点', '薪资', '公司名称', '公司简介','描述']
    if os.path.exists(filename):
        workbook = openpyxl.load_workbook(filename)
        sheet = workbook.active
        sheet.append(data)
    else:
        workbook = openpyxl.Workbook()
        sheet = workbook.active
        # 添加表头
        sheet.append(name_headers)
        sheet.append(data)
    # 保存 Excel 文件
    workbook.save(filename)



if __name__ == '__main__':
    spiderData()
相关推荐
Jelena技术达人4 小时前
Java爬虫获取1688关键字 item_search接口返回值详细解析
java·开发语言·爬虫
m0_748256344 小时前
Web 代理、爬行器和爬虫
前端·爬虫
Kai HVZ10 小时前
python爬虫----爬取视频实战
爬虫·python·音视频
B站计算机毕业设计超人10 小时前
计算机毕业设计PySpark+Hadoop中国城市交通分析与预测 Python交通预测 Python交通可视化 客流量预测 交通大数据 机器学习 深度学习
大数据·人工智能·爬虫·python·机器学习·课程设计·数据可视化
小白学大数据12 小时前
高级技术文章:使用 Kotlin 和 Unirest 构建高效的 Facebook 图像爬虫
爬虫·数据分析·kotlin
数据小小爬虫1 天前
利用Java爬虫获取苏宁易购商品详情
java·开发语言·爬虫
小木_.1 天前
【Python 图片下载器】一款专门为爬虫制作的图片下载器,多线程下载,速度快,支持续传/图片缩放/图片压缩/图片转换
爬虫·python·学习·分享·批量下载·图片下载器
lovelin+v175030409661 天前
安全性升级:API接口在零信任架构下的安全防护策略
大数据·数据库·人工智能·爬虫·数据分析
qq_375872691 天前
14爬虫:scrapy实现翻页爬取
爬虫·scrapy