初试python爬虫实战三——爬取 导师简介-简书(初试云晴雨)

 2023-10-24 23:11:31  考研全封闭式培训机构    17
[摘要]

1 基础版别 1.1 抓取主页联接,顺次造访 与前两次实战的差异不大,所以这次测验模块化编程。不再写之前脚本式的代码。写完代码之后来写的博客,所以只简略记一下编码进程中遇到的疑问以及处置方案。 requests恳求回...



1 基础版别
1.1 抓取主页联接,顺次造访
与前两次实战的差异不大,所以这次测验模块化编程。不再写之前脚本式的代码。写完代码之后来写的博客,所以只简略记一下编码进程中遇到的疑问以及处置方案。


requests恳求回来的数据呈现了中文乱码
细心分析了一下页面, 该页面的源代码为:

<html lang="zh">
<head>
...

猜测仍是编码的疑问。所以在requests的恳求中,究竟回来做一个编码处置:

# 呈现中文乱码的处置办法
resp.encoding = 'utf-8'


取出的数据中包括许多的‘\r’ '\t' '\u' '\u3000'等控制字符。处置办法为在requests之后,回来数据之前做一个.replece处置:

resp = resp.text.replace('\t', '')
.replace('\r', '')
.replace('\n', '').replace('\u3000', '')

基础版别主页究竟代码:

import time
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}


# 造访
def get_resp(url):
ua = 'mozilla/5.0 (windows nt 10.0; win64; x64) ' \
'applewebkit/537.36 (khtml, like gecko) ' \
'chrome/80.0.3987.116 safari/537.36'
header = {'user-agent': ua}
resp = requests.get(url, headers=header)
# 呈现中文乱码的处置办法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '').replace('\r', '').replace('\n', '').replace('\u3000', '')
return resp
else:
print(url + '造访失利')


def parse_teacher(resp_page):

et = etree.html(resp_page)
if et is not none:
# 处置标题
selectors = et.xpath('//h1')
titles.append(selectors[0].text)

# 处置概况介绍
selectors2 = et.xpath("//div[@class='articlecon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)

else:
print('发现一个异常页面,已跳过')


# 处置联接,顺次造访超联接
def link_parse(resp):
et = etree.html(resp)
links = et.xpath("//ul[@class='list areazslist']/li//a/@href")
for link in links:
resp_page = get_resp(link)
parse_teacher(resp_page)


def make_and_print():
data.update(zip(titles, context))
print(data)


if __name__ == '__main__':
start_time = time.time()

# 造访初始url
resp = get_resp(start_url)
# 处置联接 循环造访下载
link_parse(resp)
# 处置究竟数据并输出
make_and_print()

last_time = time.time() - start_time
print(last_time)

输出作用为:







v1作用

1.2 获取下一页
这种数据值抓取一页当然是毫无意义的,下面要做的是查看页面是不是存鄙人一页,假定存在,再持续加载下一页。中心代码为:

# 多页处置
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一页地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('页面加载结束,初步逐个下载导师材料,请稍后...')

1.3 保存到记事本
以title为文件名,将各个导师信息保存到记事本中。

def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '材料保存结束!')

完成的作用为:







究竟作用1






究竟作用2






究竟作用3

完满完成需要。第一有些结束。第一有些究竟无缺代码:

import time
import os
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}


# 造访
def get_resp(url):
ua = 'mozilla/5.0 (windows nt 10.0; win64; x64) ' \
'applewebkit/537.36 (khtml, like gecko) ' \
'chrome/80.0.3987.116 safari/537.36'
header = {'user-agent': ua}
resp = requests.get(url, headers=header)
# 呈现中文乱码的处置办法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '') \
.replace('\r', '') \
.replace('\n', '') \
.replace('\u3000', '') \
.replace('\xa0', '')
return resp
else:
print(url + '造访失利')


def parse_teacher(resp_page):
et = etree.html(resp_page)
if et is not none:
# 处置标题
selectors = et.xpath('//h1')
titles.append(selectors[0].text)

# 处置概况介绍
selectors2 = et.xpath("//div[@class='articlecon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)

else:
print('发现一个异常页面,已跳过')


# 处置联接,顺次造访超联接
def link_parse(resp):
et = etree.html(resp)
links = et.xpath("//ul[@class='list areazslist']/li//a/@href")
# 多页处置
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一页地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('页面加载结束,初步逐个下载导师材料,请稍后...')
for link in links:
resp_page = get_resp(link)
parse_teacher(resp_page)


def make_and_print():
data.update(zip(titles, context))
print(data)


def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '材料保存结束!')


if __name__ == '__main__':
start_time = time.time()
# save_data('./data_out', data)

# 造访初始url
resp = get_resp(start_url)
# 处置联接 循环造访下载
link_parse(resp)
# 处置究竟数据并输出
make_and_print()
save_data('./data_out', data)

print(len(titles))
last_time = time.time() - start_time
print(last_time)

2.多线程版
尽管数据量比照小,耗费的时刻也并不多。可是觉得前面的多线程仍是没怎么了解,再写一个demo试试吧~
首要导入threading体系库。留心ing。导入queue用于存放联接行列。

import threading

界说一个变量,控制线程个数。界说一个线程池,保存线程。界说一个联接行列

thread_num = 10
threads = []
links_queue = queue()

在联接处置函数中,将一切联接存入行列。

for link in links:
links_queue.put(link)

在主函数中创建线程

for t in range(thread_num):
t = threading.thread(target=download)
t.start()
threads.append(t)

编写download()办法

def download():
while true:
link = links_queue.get()
if link is none:
break
resp_page = get_resp(link)
parse_teacher(resp_page)
print('其时下载线程数:%s,剩下%s条联接未解析' %
(len(threading.enumerate())-1, links_queue.qsize()))


顺次退出线程



for i in range(thread_num):
links_queue.put(none)
for t in threads:
t.join()

实践作用显着增快






作用1






作用2






作用3

与单线程比较,增速一倍以上!

贴出多线程版无缺代码:

# 多线程爬取导师信息
import time
import os
import threading
from queue import queue
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}

thread_num = 10
threads = []
links_queue = queue()


# 造访
def get_resp(url):
ua = 'mozilla/5.0 (windows nt 10.0; win64; x64) ' \
'applewebkit/537.36 (khtml, like gecko) ' \
'chrome/80.0.3987.116 safari/537.36'
header = {'user-agent': ua}
resp = requests.get(url, headers=header)
# 呈现中文乱码的处置办法
resp.encoding = 'utf-8'
if resp.status_code == 200:
# return resp.text
resp = resp.text.replace('\t', '') \
.replace('\r', '') \
.replace('\n', '') \
.replace('\u3000', '') \
.replace('\xa0', '')
return resp
else:
print(url + '造访失利')


def parse_teacher(resp_page):
et = etree.html(resp_page)
if et is not none:
# 处置标题
selectors = et.xpath('//h1')
titles.append(selectors[0].text)

# 处置概况介绍
selectors2 = et.xpath("//div[@class='articlecon']//p/text()")
text = ''
for s in selectors2:
text += s
context.append(text)

else:
print('发现一个异常页面,已跳过')


# 处置联接,顺次造访超联接
def link_parse(resp):
et = etree.html(resp)
links = et.xpath("//ul[@class='list areazslist']/li//a/@href")
# 多页处置
next_url = et.xpath('//div[4]/a[11]/@href')
if next_url:
print('下一页地址:', next_url[0])
r = get_resp(next_url[0])
link_parse(r)
else:
print('页面加载结束,初步逐个下载导师材料,请稍后...')
for link in links:
links_queue.put(link)
# resp_page = get_resp(link)
# parse_teacher(resp_page)


def make_and_print():
data.update(zip(titles, context))
print(data)


def save_data(path, dicta):
if not os.path.exists(path):
os.mkdir(os.getcwd() + '\\data_out')
os.chdir(path)
for k, v in dicta.items():
filename = k+'.txt'
file_context = v
f = open(filename, 'w+', encoding='utf-8')
f.write(file_context)
f.seek(0)
f.close()
print(k, '材料保存结束!')


def download():
while true:
link = links_queue.get()
if link is none:
break
resp_page = get_resp(link)
parse_teacher(resp_page)
print('其时下载线程数:%s,剩下%s条联接未解析' %
(len(threading.enumerate())-1, links_queue.qsize()))


if __name__ == '__main__':
start_time = time.time()
# 造访初始url
resp = get_resp(start_url)
# 处置联接 循环造访下载
link_parse(resp)

for t in range(thread_num):
t = threading.thread(target=download)
t.start()
threads.append(t)

for i in range(thread_num):
links_queue.put(none)
for t in threads:
t.join()

# 处置究竟数据并输出
make_and_print()
save_data('./data_out', data)

last_time = time.time() - start_time
print('共下载%s条导师信息,耗时%s秒' % (len(titles), last_time))

农学考研414参阅用书&温习主张(2023农学414考研大纲) 返回列表

留言与评论(共有 17 条评论)