一、ajax的get请求
-
ajax的get请求---豆瓣电影第一页
* coding : utf-8 *
@Time : 2025/2/13 15:14
@Author : 20250206-里奥
@File : demo23_ajax的get请求
@Project : PythonProject10-14
import urllib.request
from demo17_qingqiuduixaingdedingzhi import headers, request, response
get请求
获取第一页数据,并保存
url
url = "https://movie.douban.com/j/chart/top_list?type=4&interval_id=100%3A90"
headers
headers ={
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0"
}1.请求对象的定制
request = urllib.request.Request(url = url, headers= headers)
2.获取响应数据
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
打印数据
print(content)
#3.下载数据到本地
需要encoding进行编码,否则会报错
open()方法默认使用的是gbk编码,需要进行解码保存:encoding = "utf-8"
fp = open('豆瓣.jsom','w',encoding = "utf-8")
fp.write(content)with open('豆瓣1.json','w',encoding = 'utf-8') as fp:
fp.write(content)
-
ajax的get请求---豆瓣电影前十页
* coding : utf-8 *
@Time : 2025/2/13 15:46
@Author : 20250206-里奥
@File : demo24_ajax的get请求_豆瓣电影所有页.py
@Project : PythonProject10-14
from Tools.scripts.generate_opcode_h import header
from demo17_qingqiuduixaingdedingzhi import request, response, content
url = ("https://movie.douban.com/j/chart/top_list?type=4&interval_id=100%3A90&action=&start=4&limit=20")
import urllib.parse
import urllib.request下载豆瓣电影前十页数据
1. 请求对象定制
#2. 获取响应数据
3. 下载数据
定义create_requests
def create_request(page):
basic_url = "https://movie.douban.com/j/chart/top_list?type=4&interval_id=100%3A90&action=&start=4&limit=20"
# 参数
data = {
'start':(page - 1) * 20,
'limit':20
}data = urllib.parse.urlencode(data) url = basic_url + data # print(url) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0" } # 请求对象定制 request = urllib.request.Request(url=url, headers=headers) return request
定义get_content
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content定义down_load
def down_load(page,content):
# '+'号两端必须都是字符串,才能拼接. 类型转换:str(page)
with open('豆瓣_' + str(page) + '.json','w',encoding='utf-8') as fp:
fp.write(content)封装函数
这是1个程序的入口
if name == 'main':
start_page = int(input("请输入起始页码:"))
end_page = int(input("请输入结束页码:"))遍历
for page in range(start_page, end_page + 1): # print(page) #每一页都有自己请求对象的定制 request = create_request(page) # 获取响应数据 content = get_content(request) #下载 down_load(page,content)
二、Ajax的post请求
# _*_ coding : utf-8 _*_
# @Time : 2025/2/14 09:05
# @Author : 20250206-里奥
# @File : demo25_ajax的post请求_获取杭州KFC餐厅信息
# @Project : PythonProject10-14
# post请求
# 第1页
# https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname
# cname: 杭州
# pid:
# pageIndex: 1
# pageSize: 10
# 第2页
# https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname
# cname: 杭州
# pid:
# pageIndex: 2
# pageSize: 10
# 1.导入
import urllib.request
from demo20_url的urlencode方法 import basic_url, request
from demo24_ajax的get请求_豆瓣电影所有页 import create_request, down_load
#2. 定义basic_url
# basic_url = "https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname"
# 3. 参数
# 定义create_request
def create_request(page):
basic_url = "https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname"
#数据
data = {
"cname": "杭州",
"pid": '',
'pageIndex': page ,
'pageSize': '10'
}
# 编码
#导入
import urllib.parse
# 编码
data = urllib.parse.urlencode(data ).encode('utf-8')
#headers
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0"
}
# 请求对象定制
request = urllib.request.Request(url = basic_url,headers=headers,data=data)
# 1)返回request
return request
# 定义
# 4)接收request
def get_content(request):
# 5)使用request
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
# 定义
def down_load(page,content):
with open('KFC_' + str(page) + '.json','w',encoding = 'utf-8') as fp:
fp.write(content)
# 程序入口
if __name__ == '__main__':
# 起始页
start_page = int(input('请输入起始页码:'))
end_page = int(input('请输入结束页码:'))
# for循环遍历
for page in range(start_page,end_page + 1):
print(page)
#请求对象定制
# 2)接收request
request = create_request(page)
# 获取网页源码
# 3)传递request
content = get_content(request)
# 下载
down_load(page,content)
# 查看.json文件快捷键 :CTRL + alt + L
