requests模块:python原生的一款基于网络请求的模块,功能非常强大,简单便携,效率极高
作用:模拟浏览器发请求
浏览器请求的工作流程:
1. 指定url
2. 发起请求
3. 获取响应数据
4. 持久化存储
#安装环境
pip install requests
#实战编码-需求:爬取搜狗首页页面数据
import requests
if __name__ == "__main__":
#step 1: 指定url
url = "https://www.sogou.com/"
#step 2: 发起请求
#get方法会返回一个响应对象
response = requests.get(url=url)
#step 3: 获取响应数据. text返回的是字符串形式的响应数据
page_text = response.text
print(page_text)
#step 4: 持久化存储
with open('D:/Notes/python爬虫/sogou.html', 'w', encoding = 'utf-8') as fp:
fp.write(page_text)
print('爬取数据结束!')
import requests
if __name__ == "__main__":
url = 'https://www.sogou.com/web'
#处理url携带的参数:封装到字典中
kw = input('enter a word: ')
param = {
'query': kw
}
#对指定的url发起请求对应的url是携带参数的,并且请求过程中处理了参数
response = requests.get(url=url, params=param)
page_text = response.text
fileName = 'D:/Notes/python爬虫/'+ kw + '.html'
with open(fileName, 'w', encoding = 'utf-8') as fp:
fp.write(page_text)
print(fileName, "保存成功!")
UA检测:门户网站的服务器检测对应请求的载体身份标识,如果检测到请求的载体身份标识为某一款浏览器,说明该请求是一个正常的请求。但是,如果检测到请求的载体身份标识不是基于某一款浏览器的,则表示该次查询为不正常的请求(爬虫),则服务器可能拒绝该次请求
UA伪装:让爬虫对应的请求载体身份标识伪装成某一款浏览器
import requests
if __name__ == "__main__":
#UA伪装:将对应的User-Agent封装到一个字典中
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
url = 'https://www.sogou.com/web'
#处理url携带的参数:封装到字典中
kw = input('enter a word: ')
param = {
'query': kw
}
#对指定的url发起请求对应的url是携带参数的,并且请求过程中处理了参数
response = requests.get(url=url, params=param, headers=headers)
page_text = response.text
fileName = 'D:/Notes/python爬虫/'+ kw + '.html'
with open(fileName, 'w', encoding = 'utf-8') as fp:
fp.write(page_text)
print(fileName, "保存成功!")
post请求(携带了参数)
响应数据是一组json数据
import requests
import json
if __name__ == "__main__":
#1.指定url
post_url = 'https://fanyi.baidu.com/sug'
#2.进行UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
#3.post请求参数处理(与get请求一致)
word = input ('enter a word:')
data = {
'kw': word
}
#4.请求发送
response = requests.post(url=post_url, data=data, headers=headers)
#5.获取响应数据:json()方法返回的是obj(如果确认响应数据是json类型的,才可以使用json())
dic_obj = response.json()
#6.进行持久化存储
fileName = 'D:/Notes/python爬虫/' + word +'.json'
fp = open(fileName, 'w', encoding = 'utf-8')
json.dump(dic_obj, fp=fp, ensure_ascii=False)
print("over!")
import requests
import json
if __name__ == "__main__":
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
url = 'https://movie.douban.com/j/chart/top_list'
param = {
'type': '24',
'interval_id': '100:90',
'action': '',
'start': '0', #从库中第几部电影去取
'limit': '20', #一次取出的个数
}
response = requests.get(url=url, params=param, headers=headers)
list_data = response.json()
fileName = 'D:/Notes/python爬虫/douban.json'
fp = open(fileName, 'w', encoding = 'utf-8')
json.dump(list_data, fp=fp, ensure_ascii=False)
print("over!")
import requests
import json
if __name__ == "__main__":
post_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
#2.进行UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
#3.post请求参数处理(与get请求一致)
word = input ('enter a word:')
data = {
'cname':'',
'pid':'',
'keyword': word,
'pageIndex': '1',
'pageSize': '10',
}
#4.请求发送
response = requests.post(url=post_url, data=data, headers=headers)
page_text = response.text
fileName = 'D:/Notes/python爬虫/' + word +'.html'
with open(fileName, 'w', encoding = 'utf-8') as fp:
fp.write(page_text)
print("over!")
http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList
http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById
观察后发现:所有的post请求的url都是一样的,只有id值是不同的。如果我们可以批量获取多家企业的id后,就可以获取企业的详细信息
import requests
import json
if __name__ == "__main__":
#批量获取不同的企业id值
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
id_list = [] #存储企业id
all_data_list = [] #存储所有的企业详情数据
#参数的封装
for page in range(1,6):
page = str(page)
data = {
'on': 'true',
'page': page,
'pageSize': '15',
'productName':'',
'conditionType': '1',
'applyname':'',
}
json_ids = requests.post(url=url, data=data, headers=headers).json()
for dic in json_ids['list']:
id_list.append(dic['ID'])
#获取企业详情数据
post_url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
for id in id_list:
data = {
'id': id
}
detail_json = requests.post(url=post_url, data=data, headers=headers).json()
all_data_list.append(detail_json)
#持久化存储
fp = open('./python爬虫/allData.json', 'w', encoding='utf-8')
json.dump(all_data_list, fp=fp, ensure_ascii=False)
print('over!')
本文章使用limfx的vsocde插件快速发布