猎聘爬虫(附源码)

废话不多说直接附源码

cookies需要替换成自己的 , 该网站在不登录的情况下只能请求到10页数据 , 想要获得完整数据需要携带登录后的cookies

python 复制代码
import requests
import json
from lxml import etree
import os
import openpyxl




headers = {
    "Accept": "application/json, text/plain, */*",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Content-Type": "application/json;charset=UTF-8",
    "Origin": "https://www.liepin.com",
    "Pragma": "no-cache",
    "Referer": "https://www.liepin.com/",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-site",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
    "X-Client-Type": "web",
    "X-Fscp-Bi-Stat": "{\"location\": \"https://www.liepin.com/zhaopin/?inputFrom=head_navigation&scene=init&workYearCode=0&ckId=jrkiappybgyczm7c2sk5zmfzwgpqpqia\"}",
    "X-Fscp-Fe-Version": "",
    "X-Fscp-Std-Info": "{\"client_id\": \"40108\"}",
    "X-Fscp-Trace-Id": "f22eb671-3c8f-4f94-8b14-e5e7d176be52",
    "X-Fscp-Version": "1.1",
    "X-Requested-With": "XMLHttpRequest",
    "X-XSRF-TOKEN": "hCnGTNiJQfe47qu4x2OChA",
    "sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Microsoft Edge\";v=\"122\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}


def spiderData():
    # 循环每一页
    for i in range(1, 21):
        # 配置账号参数
        cookies =  ''
        # 网页链接
        url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
        # 参数
        data = {
            "data": {
                "mainSearchPcConditionForm": {
                    "city": "410",
                    "dq": "410",
                    "pubTime": "",
                    "currentPage": f"{i}",
                    "pageSize": 40,
                    "key": "",
                    "suggestTag": "",
                    "workYearCode": "0",
                    "compId": "",
                    "compName": "",
                    "compTag": "",
                    "industry": "H01$H01",
                    "salary": "",
                    "jobKind": 2,
                    "compScale": "",
                    "compKind": "",
                    "compStage": "",
                    "eduLevel": ""
                },
                "passThroughForm": {
                    "scene": "init",
                    "ckId": "0nbwaavz2gngc40f8xmfp59in6ymulua",
                    "skId": "pf8wezdo0ezilzl4tyd1g4tcoyh43qe9",
                    "fkId": "0nbwaavz2gngc40f8xmfp59in6ymulua",
                    "suggest": None
                }
            }
        }
        data = json.dumps(data, separators=(',', ':'))
        response = requests.post(url, headers=headers, cookies=cookies, data=data).json()

        print(f"正在爬取第{i + 1}页")
        praseData(response)


# 这段代码主要用于解析和保存来自招聘网站的职位信息。下面是加上注释后的代码:
def praseData(data):
    for z in range(0,40):
        job_card_list = data.get('data', {}).get('data', {}).get('jobCardList', [])
        if 0 <= z < len(job_card_list):
            res_json_item = job_card_list[z]

            # 公司名称
            comp_name = res_json_item.get('comp', {}).get('compName')

            # 职位链接
            job_link = res_json_item.get('job', {}).get('link')

            # 工作地点
            place = res_json_item.get('job', {}).get('dq')

            # 薪资
            salary = res_json_item.get('job', {}).get('salary')

            # 职位名称
            job = res_json_item.get('job', {}).get('title')
        else:
            # 如果z不是有效索引或job_card_list为空,则处理错误或设置默认值
            comp_name = None
            job_link = None
            place = None
            salary = None
            job = None


            # 解析职位详情页面
        sub_data = requests.get(job_link, headers=headers).text
        # 使用 etree 解析 HTML 数据
        xml = etree.HTML(sub_data)
        # 尝试从详情页面中提取公司简介
        try:
            details = xml.xpath('//dl[@class="paragraph"]/dd/text()')[0]
        except:
            details = None

        # 公司简介
        companyProfile = xml.xpath("//div[@class='paragraph-box']/div/text()")
        company_profile = ','.join(companyProfile)

        # 公司信息

        try:
            intorduct = details.split('截止日期')[0].split()
            intorducts = ','.join(intorduct)
        except:
            intorducts = None

        # 保存到 excle 表格
        job_list = [job,place,salary,comp_name,company_profile,intorducts]
        print(job_list)
        save_data_to_xlsx(job_list)

# 保存到excle表格
def save_data_to_xlsx(data ):

    filename = f'job.xlsx'
    name_headers = ['职位', '地点', '薪资', '公司名称', '公司简介','描述']
    if os.path.exists(filename):
        workbook = openpyxl.load_workbook(filename)
        sheet = workbook.active
        sheet.append(data)
    else:
        workbook = openpyxl.Workbook()
        sheet = workbook.active
        # 添加表头
        sheet.append(name_headers)
        sheet.append(data)
    # 保存 Excel 文件
    workbook.save(filename)



if __name__ == '__main__':
    spiderData()
相关推荐
0思必得03 小时前
[Web自动化] 反爬虫
前端·爬虫·python·selenium·自动化
喵手4 小时前
Python爬虫实战:从零搭建字体库爬虫 - requests+lxml 实战采集字体网字体信息数据(附 CSV 导出)!
爬虫·python·爬虫实战·零基础python爬虫教学·csv导出·采集字体库数据·字体库字体信息采集
喵手6 小时前
Python爬虫实战:GovDataMiner —— 开放数据门户数据集元数据采集器(附 CSV 导出)!
爬虫·python·爬虫实战·python爬虫工程化实战·零基础python爬虫教学·open data·开放数据门户数据集列表
喵手14 小时前
Python爬虫实战:把“菜鸟教程”的知识树连根拔起(递归/遍历实战)(附 CSV 导出)!
爬虫·python·爬虫实战·python爬虫工程化实战·零基础python爬虫教学·菜鸟教程数据采集·采集菜鸟教程于csv
0思必得019 小时前
[Web自动化] 数据抓取、解析与存储
运维·前端·爬虫·selenium·自动化·web自动化
喵手1 天前
Python爬虫零基础入门【第九章:实战项目教学·第15节】搜索页采集:关键词队列 + 结果去重 + 反爬友好策略!
爬虫·python·爬虫实战·python爬虫工程化实战·零基础python爬虫教学·搜索页采集·关键词队列
喵手1 天前
Python爬虫零基础入门【第九章:实战项目教学·第14节】表格型页面采集:多列、多行、跨页(通用表格解析)!
爬虫·python·python爬虫实战·python爬虫工程化实战·python爬虫零基础入门·表格型页面采集·通用表格解析
0思必得01 天前
[Web自动化] 爬虫之API请求
前端·爬虫·python·selenium·自动化
喵手1 天前
Python爬虫实战:从零构建 Hacker News 数据采集系统:API vs 爬虫的技术抉择!(附CSV导出 + SQLite 存储)!
爬虫·python·爬虫实战·hacker news·python爬虫工程化实战·零基础python爬虫教学·csv导出
0思必得01 天前
[Web自动化] 爬虫之网络请求
前端·爬虫·python·selenium·自动化·web自动化