百度翻译:利用爬虫技术模拟人工查询英文单词,将查到的信息保存到本地
python
import requests
import json
# 1.指定url
post_url = 'https://fanyi.baidu.com/sug'
# 2.UA标识
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
}
# 3.post请求参数处理
'''
在这个上下文中,"payload"指的是在中HTTP请求的主体部分,包含了需要传输的数据。它可以是表单数据、JSON数据、文件等等。
'''
word = input('请输入中文字词:')
data = {
'kw': word
}
# 4.发起请求
response = requests.post(url=post_url, headers=headers, data=data)
# 5.获取响应数据
result_obi = response.json()
print(result_obi)
# 6.持久化数据
filename = word+'.json'
f = open('D:/Pythonstudy/python爬虫/百度翻译/'+filename, 'w', encoding='utf-8')
json.dump(result_obi, f, ensure_ascii=False)
print("success")
f.close()
网页采集器:输入关键字,获取目标网页关于关键字的信息
python
import requests
# UA标识
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
}
# 请求头
url = 'https://sogou.com/web?'
# url携带的参数
parms = {
'query': '贾康康'
}
response = requests.get(url, params=parms, headers=headers)
page_text = response.text
with open('sogou.html', 'w', encoding='utf-8') as f:
f.write(page_text)
肯德基:post请求的应用,输入目标地点,获取目标地点的相关门店信息
python
import requests
url = 'https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
data = {
'cname': '',
'pid': '',
'keyword': '兰州',
'pageIndex': '1',
'pageSize': '20'
}
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
}
response = requests.post(url=url, headers=headers, data=data)
result = response.json()
print(result)
豆瓣电影排行榜信息爬取:通过指定请求头的参数,来获取想要的排行榜的区间
python
import json
import requests
# 1.指定url
douban_url = 'https://movie.douban.com/j/chart/top_list'
# 2.指定参数
data = {
'type': '25',
'interval_id': '100:90',
'action': '',
'start': '1',
'limit': '20'
}
# 3.UA标识
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
}
# 4.请求访问
'''如果上面的地址栏变化证明不是ajax请求'''
response = requests.get(url=douban_url,headers=headers,params=data)
# 5.获取响应参数
result = response.json()
print(result)
# 6.持久化数据
f = open('D:/Pythonstudy/python爬虫/豆瓣电影/douban.json','w',encoding='utf-8')
json.dump(result,f,ensure_ascii=False)
f.close()