用爬蟲的小夥伴,肯定經常遇到ip被封的情況,而現在網路上的代理ip免費的已經很難找了,那麼現在就用python的requests庫從xicidaili.com爬取代理ip,創建一個ip代理池,以備使用。

本代碼包括ip的爬取,檢測是否可用,可用保存,通過函數get_proxies可以獲得ip,如:{HTTPS: 106.12.7.54:8118}

下面放上源代碼,並詳細注釋:

import requests
from lxml import etree
from requests.packages import urllib3
import random, time

urllib3.disable_warnings()

def spider(pages, max_change_porxies_times=300):
"""
抓取 XiciDaili.com 的 http類型-代理ip-和埠號

將所有抓取的ip存入 raw_ips.csv 待處理, 可用 check_proxies() 檢查爬取到的代理ip是否可用
-----
:param pages:要抓取多少頁
:return:無返回
"""
s = requests.session()
s.trust_env = False
s.verify = False
urls = https://www.xicidaili.com/nn/{}
proxies = {}
try_times = 0
for i in range(pages):
url = urls.format(i + 1)
s.headers = {
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,
Accept-Encoding: gzip, deflate, br,
Accept-Language: zh-CN,zh;q=0.9,
Connection: keep-alive,
Referer: urls.format(i if i > 0 else ),
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36}
while True:
content = s.get(url, headers=s.headers, proxies=proxies)
time.sleep(random.uniform(1.5, 4)) # 每讀取一次頁面暫停一會,否則會被封
if content.status_code == 503: # 如果503則ip被封,就更換ip
proxies = get_proxies()
try_times += 1
print(f第{str(try_times):0>3s}次變更,當前{proxies})
if try_times > max_change_porxies_times:
print(超過最大嘗試次數,連接失敗!)
return -1
continue
else:
break # 如果返回碼是200 ,就跳出while循環,對爬取的頁面進行處理

print(f正在抓取第{i+1}頁數據,共{pages})
for j in range(2, 102): # 用簡單的xpath提取http,host和port
tree = etree.HTML(content.text)
http = tree.xpath(f//table[@id="ip_list"]/tr[{j}]/td[6]/text())[0]
host = tree.xpath(f//table[@id="ip_list"]/tr[{j}]/td[2]/text())[0]
port = tree.xpath(f//table[@id="ip_list"]/tr[{j}]/td[3]/text())[0]
check_proxies(http, host, port) # 檢查提取的代理ip是否可用

def check_proxies(http, host, port, test_url=http://www.baidu.com):
"""
檢測給定的ip信息是否可用

根據http,host,port組成proxies,對test_url進行連接測試,如果通過,則保存在 ips_pool.csv 中
:param http: 傳輸協議類型
:param host: 主機
:param port: 埠號
:param test_url: 測試ip
:return: None
"""
proxies = {http: host + : + port}
try:
res = requests.get(test_url, proxies=proxies, timeout=2)
if res.status_code == 200:
print(f{proxies}檢測通過)
with open(ips_pool.csv, a+) as f:
f.write(,.join([http, host, port]) +
)
except Exception as e: # 檢測不通過,就不保存,別讓報錯打斷程序
print(e)

def check_local_ip(fn, test_url):
"""
檢查存放在本地ip池的代理ip是否可用

通過讀取fn內容,載入每一條ip對test_url進行連接測試,鏈接成功則儲存在 ips_pool.csv 文件中
:param fn: filename,儲存代理ip的文件名
:param test_url: 要進行測試的ip
:return: None
"""
with open(fn, r) as f:
datas = f.readlines()
ip_pools = []
for data in datas:
# time.sleep(1)
ip_msg = data.strip().split(,)
http = ip_msg[0]
host = ip_msg[1]
port = ip_msg[2]
proxies = {http: host + : + port}
try:
res = requests.get(test_url, proxies=proxies, timeout=2)
if res.status_code == 200:
ip_pools.append(data)
print(f{proxies}檢測通過)
with open(ips_pool.csv, a+) as f:
f.write(,.join([http, host, port]) +
)
except Exception as e:
print(e)
continue

def get_proxies(ip_pool_name=ips_pool.csv):
"""
從ip池獲得一個隨機的代理ip
:param ip_pool_name: str,存放ip池的文件名,
:return: 返回一個proxies字典,形如:{HTTPS: 106.12.7.54:8118}
"""
with open(ip_pool_name, r) as f:
datas = f.readlines()
ran_num = random.choice(datas)
ip = ran_num.strip().split(,)
proxies = {ip[0]: ip[1] + : + ip[2]}
return proxies

if __name__ == __main__:
t1 = time.time()
spider(pages=3400)
t2 = time.time()
print(抓取完畢,時間:, t2 - t1)

# check_local_ip(raw_ips.csv,http://www.baidu.com)

推薦閱讀:

相關文章