Python爬虫:设置Cookie解决网站拦截并爬取蚂蚁短租
# -*- coding: utf-8 -*-
import urllib
import re
from bs4 import BeautifulSoup
import codecs
url = 'http://www.mayi.com/guiyang/?map=no'
response=urllib.urlopen(url)
contents = response.read()
soup = BeautifulSoup(contents, "html.parser")
print soup.title
print soup
#短租房名称
for tag in soup.find_all('dd'):
for name in tag.find_all(attrs={"class":"room-detail clearfloat"}):
fname = name.find('p').get_text()
print u'[短租房名称]', fname.replace('\n','').strip()
# -*- coding: utf-8 -*-
import urllib2
import re
from bs4 import BeautifulSoup
#爬虫函数
def gydzf(url):
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
headers={"User-Agent":user_agent}
request=urllib2.Request(url,headers=headers)
response=urllib2.urlopen(request)
contents = response.read()
soup = BeautifulSoup(contents, "html.parser")
for tag in soup.find_all('dd'):
#短租房名称
for name in tag.find_all(attrs={"class":"room-detail clearfloat"}):
fname = name.find('p').get_text()
print u'[短租房名称]', fname.replace('\n','').strip()
#短租房价格
for price in tag.find_all(attrs={"class":"moy-b"}):
string = price.find('p').get_text()
fprice = re.sub("[¥]+".decode("utf8"), "".decode("utf8"),string)
fprice = fprice[0:5]
print u'[短租房价格]', fprice.replace('\n','').strip()
#评分及评论人数
for score in name.find('ul'):
fscore = name.find('ul').get_text()
print u'[短租房评分/评论/居住人数]', fscore.replace('\n','').strip()
#网页链接url
url_dzf = tag.find(attrs={"target":"_blank"})
urls = url_dzf.attrs['href']
print u'[网页链接]', urls.replace('\n','').strip()
urlss = 'http://www.mayi.com' + urls + ''
print urlss
#主函数
if __name__ == '__main__':
i = 1
while i<10:
print u'页码', i
url = 'http://www.mayi.com/guiyang/' + str(i) + '/?map=no'
gydzf(url)
i = i+1
else:
print u"结束"
页码 1
[短租房名称] 大唐东原财富广场--城市简约复式民宿
[短租房价格] 298
[短租房评分/评论/居住人数] 5.0分·5条评论·二居·可住3人
[网页链接] /room/851634765
http://www.mayi.com/room/851634765
[短租房名称] 大唐东原财富广场--清新柠檬复式民宿
[短租房价格] 568
[短租房评分/评论/居住人数] 2条评论·三居·可住6人
[网页链接] /room/851634467
http://www.mayi.com/room/851634467
...
页码 9
[短租房名称] 【高铁北站公园旁】美式风情+超大舒适安逸
[短租房价格] 366
[短租房评分/评论/居住人数] 3条评论·二居·可住5人
[网页链接] /room/851018852
http://www.mayi.com/room/851018852
[短租房名称] 大营坡(中大国际购物中心附近)北欧小清新三室
[短租房价格] 298
[短租房评分/评论/居住人数] 三居·可住6人
[网页链接] /room/851647045
http://www.mayi.com/room/851647045
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) ... Chrome/61.0.3163.100 Safari/537.36"
cookie="mediav=%7B%22eid%22%3A%22387123...b3574ef2-21b9-11e8-b39c-1bc4029c43b8"
headers={"User-Agent":user_agent,"Cookie":cookie}
request=urllib2.Request(url,headers=headers)
response=urllib2.urlopen(request)
contents = response.read()
soup = BeautifulSoup(contents, "html.parser")
for tag1 in soup.find_all(attrs={"class":"main"}):
import urllib2
import re
from bs4 import BeautifulSoup
import codecs
import csv
c = open("ycf.csv","wb") #write 写
c.write(codecs.BOM_UTF8)
writer = csv.writer(c)
writer.writerow(["短租房名称","地址","价格","评分","可住人数","人均价格"])
#爬取详细信息
def getInfo(url,fname,fprice,fscore,users):
#通过浏览器开发者模式查看访问使用的user_agent及cookie设置访问头(headers)避免反爬虫,且每隔一段时间运行要根据开发者中的cookie更改代码中的cookie
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
cookie="mediav=%7B%22eid%22%3A%22387123%22eb7; mayi_uuid=1582009990674274976491; sid=42200298656434922.85.130.130"
headers={"User-Agent":user_agent,"Cookie":cookie}
request=urllib2.Request(url,headers=headers)
response=urllib2.urlopen(request)
contents = response.read()
soup = BeautifulSoup(contents, "html.parser")
#短租房地址
for tag1 in soup.find_all(attrs={"class":"main"}):
print u'短租房地址:'
for tag2 in tag1.find_all(attrs={"class":"desWord"}):
address = tag2.find('p').get_text()
print address
#可住人数
print u'可住人数:'
for tag4 in tag1.find_all(attrs={"class":"w258"}):
yy = tag4.find('span').get_text()
print yy
fname = fname.encode("utf-8")
address = address.encode("utf-8")
fprice = fprice.encode("utf-8")
fscore = fscore.encode("utf-8")
fpeople = yy[2:3].encode("utf-8")
ones = int(float(fprice))/int(float(fpeople))
#存储至本地
writer.writerow([fname,address,fprice,fscore,fpeople,ones])
#爬虫函数
def gydzf(url):
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
headers={"User-Agent":user_agent}
request=urllib2.Request(url,headers=headers)
response=urllib2.urlopen(request)
contents = response.read()
soup = BeautifulSoup(contents, "html.parser")
for tag in soup.find_all('dd'):
#短租房名称
for name in tag.find_all(attrs={"class":"room-detail clearfloat"}):
fname = name.find('p').get_text()
print u'[短租房名称]', fname.replace('\n','').strip()
#短租房价格
for price in tag.find_all(attrs={"class":"moy-b"}):
string = price.find('p').get_text()
fprice = re.sub("[¥]+".decode("utf8"), "".decode("utf8"),string)
fprice = fprice[0:5]
print u'[短租房价格]', fprice.replace('\n','').strip()
#评分及评论人数
for score in name.find('ul'):
fscore = name.find('ul').get_text()
print u'[短租房评分/评论/居住人数]', fscore.replace('\n','').strip()
#网页链接url
url_dzf = tag.find(attrs={"target":"_blank"})
urls = url_dzf.attrs['href']
print u'[网页链接]', urls.replace('\n','').strip()
urlss = 'http://www.mayi.com' + urls + ''
print urlss
getInfo(urlss,fname,fprice,fscore,user_agent)
#主函数
if __name__ == '__main__':
i = 0
while i<33:
print u'页码', (i+1)
if(i==0):
url = 'http://www.mayi.com/guiyang/?map=no'
if(i>0):
num = i+2 #除了第一页是空的,第二页开始按2顺序递增
url = 'http://www.mayi.com/guiyang/' + str(num) + '/?map=no'
gydzf(url)
i=i+1
c.close()
点击下方“阅读原文”查看更多
作者信息
最新评论
推荐文章
作者最新文章
你可能感兴趣的文章
最新文章
Copyright Disclaimer: The copyright of contents (including texts, images, videos and audios) posted above belong to the User who shared or the third-party website which the User shared from. If you found your copyright have been infringed, please send a DMCA takedown notice to [email protected]rengine.us. For more detail of the source, please click on the button "Read Original Post" below. For other communications, please send to [email protected].
版权声明:以上内容为用户推荐收藏至CareerEngine平台,其内容(含文字、图片、视频、音频等)及知识版权均属用户或用户转发自的第三方网站,如涉嫌侵权,请通知[email protected]进行信息删除。如需查看信息来源,请点击“查看原文”。如需洽谈其它事宜,请联系[email protected]。
版权声明:以上内容为用户推荐收藏至CareerEngine平台,其内容(含文字、图片、视频、音频等)及知识版权均属用户或用户转发自的第三方网站,如涉嫌侵权,请通知[email protected]进行信息删除。如需查看信息来源,请点击“查看原文”。如需洽谈其它事宜,请联系[email protected]。