[新手上路]批处理新手入门导读[视频教程]批处理基础视频教程[视频教程]VBS基础视频教程[批处理精品]批处理版照片整理器
[批处理精品]纯批处理备份&还原驱动[批处理精品]CMD命令50条不能说的秘密[在线下载]第三方命令行工具[在线帮助]VBScript / JScript 在线参考
返回列表 发帖

[转载代码] Python爬虫请教:爬链家二手房的代码错误,不知道问题在哪?

爬虫请教:一个爬链家二手房的代码错误,是在python2.7环境下执行。一直有个问题:in houseinformation
pos = re.search(re.compile(pattern_position),house_info_pre).group(0).split("'")[1]
AttributeError: 'NoneType' object has no attribute 'group'


代码如下:
  1. # -*- coding: utf-8 -*-
  2. from bs4 import BeautifulSoup
  3. import requests
  4. import json
  5. import re
  6. import time
  7. from collections import Counter
  8. import pandas as pd
  9. starturl_list = ['https://hz.lianjia.com/ershoufang/jianggan/',
  10.                  'https://hz.lianjia.com/ershoufang/xihu/',
  11.                  'https://hz.lianjia.com/ershoufang/xiacheng/',
  12.                  'https://hz.lianjia.com/ershoufang/gongshu/',
  13.                  'https://hz.lianjia.com/ershoufang/shangcheng/',
  14.                  'https://hz.lianjia.com/ershoufang/binjiang/',
  15.                  'https://hz.lianjia.com/ershoufang/yuhang/',
  16.                  'https://hz.lianjia.com/ershoufang/xiaoshan/',
  17.                  'https://hz.lianjia.com/ershoufang/xiasha/']
  18. #获取最大页面数量
  19. #获取二手房每一页的url
  20. def get_pageurls(url):
  21.     request = requests.get(url)
  22.     soup = BeautifulSoup(request.text,'html.parser')
  23.     totalnum = json.loads(soup.find('div',{'class':"page-box house-lst-page-box"}).get('page-data'))['totalPage']+1
  24.     pageurls_list.append(url)
  25.     for num in range(2,totalnum):
  26.         newurl = url + 'pg{}/'.format(num)
  27.         pageurls_list.append(newurl)
  28. #获取每一页的二手房url
  29. def get_eachurls(pageurl):  
  30.     request = requests.get(pageurl)
  31.     soup = BeautifulSoup(request.text,'html.parser')
  32.     for i in soup.find_all('li',{'class':'clear'}):
  33. allinfo = []
  34. def houseinformation(houseurl):
  35.     global allinfo
  36.     request = requests.get(houseurl)
  37.     soup = BeautifulSoup(request.text,'html.parser')
  38.     info = {}
  39.     list = soup.find_all('script')
  40. #    try:
  41.     house_info_pre = list[26].text.encode('utf-8')
  42.     pattern_position = '''resblockPosition.*\''''
  43.     pos = re.search(re.compile(pattern_position),house_info_pre).group(0).split("'")[1]
  44.     longi = pos.split(',')[0]
  45.     lati = pos.split(',')[1]
  46.     try:
  47.         info[u'经度'] = longi
  48.     except:
  49.         info[u'经度'] = None
  50.     try:
  51.         info[u'纬度'] = lati
  52.     except:
  53.         info[u'纬度'] = None
  54. #获取标题、单价、总价
  55.     try:
  56.         info[u'标题'] = unicode(soup.find('div',{'class':'title'}).contents[1].get('title'))
  57.     except:
  58.         info[u'标题'] = None      
  59.     try:
  60.         info[u'副标题'] = unicode(soup.find('div',{'class':'title'}).contents[3].string)
  61.     except:
  62.         info[u'副标题'] = None        
  63.     try:
  64.         info[u'总价'] = soup.find('div',{'class':'price'}).find('span',{'class':'total'}).string + soup.find('div',{'class':'price'}).find('span',{'class':'unit'}).string
  65.     except:
  66.         info[u'总价'] = None      
  67.     try:
  68.         info[u'单价'] = soup.find('span',{'class':'unitPriceValue'}).get_text()
  69.     except:
  70.         info[u'单价'] = None
  71.     base = soup.find('div',{'class':'introContent'}).contents[1].ul.find_all('li')
  72.     try:
  73.         info[u'房屋类型'] = unicode(base[0].contents[1].string)
  74.     except:
  75.         info[u'房屋类型'] = None
  76.     try:
  77.         info[u'所在楼层'] = unicode(base[1].contents[1].string)
  78.     except:
  79.         info[u'所在楼层'] = None
  80.     try:
  81.         info[u'建筑面积'] = unicode(base[2].contents[1].string)
  82.     except:
  83.         info[u'建筑面积'] = None
  84.     try:
  85.         info[u'户型结构'] = unicode(base[3].contents[1].string)
  86.     except:
  87.         info[u'户型结构'] = None
  88.     try:
  89.         info[u'套内面积'] = unicode(base[4].contents[1].string)
  90.     except:
  91.         info[u'套内面积'] = None
  92.     try:
  93.         info[u'建筑类型'] = unicode(base[5].contents[1].string)
  94.     except:
  95.         info[u'建筑类型'] = None
  96.     try:
  97.         info[u'房屋朝向'] = unicode(base[6].contents[1].string)
  98.     except:
  99.         info[u'房屋朝向'] = None
  100.     try:
  101.         info[u'建筑结构'] = unicode(base[7].contents[1].string)
  102.     except:
  103.         info[u'建筑结构'] = None
  104.     try:
  105.         info[u'配备电梯'] = unicode(base[8].contents[1].string)
  106.     except:
  107.         info[u'配备电梯'] = None
  108.     trans = soup.find('div',{'class':'introContent'}).contents[3].ul.find_all('li')
  109.     try:
  110.         info[u'挂牌时间'] = unicode(trans[0].contents[1].string)
  111.     except:
  112.         info[u'挂牌时间'] = None
  113.     try:
  114.         info[u'交易属性'] = unicode(trans[1].contents[1].string)
  115.     except:
  116.         info[u'交易属性'] = None
  117.     try:
  118.         info[u'上次交易'] = unicode(trans[2].contents[1].string)
  119.     except:
  120.         info[u'上次交易'] = None
  121.     try:
  122.         info[u'房屋用途'] = unicode(trans[3].contents[1].string)
  123.     except:
  124.         info[u'房屋用途'] = None
  125.     try:
  126.         info[u'房屋年限'] = unicode(trans[4].contents[1].string)
  127.     except:
  128.         info[u'房屋年限'] = None
  129.     try:
  130.         info[u'产权所属'] = unicode(trans[5].contents[1].string)
  131.     except:
  132.         info[u'产权所属'] = None
  133.     try:
  134.         info[u'抵押信息'] = unicode(trans[6].contents[1].string)
  135.     except:
  136.         info[u'抵押信息'] = None
  137.     try:
  138.         info[u'房本备件'] = unicode(trans[7].contents[1].string)
  139.     except:
  140.         info[u'房本备件'] = None
  141.     try:
  142.         info[u'房源编码'] = unicode(trans[8].contents[1].string)
  143.     except:
  144.         info[u'房源编码'] = None
  145. #获取小区信息
  146.     try:
  147.         info[u'小区名称'] = re.search("resblockName:'(.*?)'",request.text).group(1)
  148.     except:
  149.         info[u'小区名称'] = None
  150.     try:
  151.         info[u'网址'] = houseurl      
  152.     except:
  153.         info[u'网址'] = None
  154.     allinfo.append(info)
  155. pageurls_list = []
  156. eachurl_list = []
  157. get_pageurls(starturl_list[7])
  158. n = 1
  159. for i in pageurls_list:
  160.     get_eachurls(i)
  161.     print '储存第{}页网址'.format(n)
  162.     n+=1
  163. for i in range(len(eachurl_list)):   
  164.     houseinformation(eachurl_list[i])
  165.     print u'抓取第{}条信息,房源名称为:{}'.format(i+1,allinfo[i][u'标题'])
  166. #    time.sleep(0.5)
  167. df = pd.DataFrame(allinfo)
  168. df.to_csv(r"data_lianjia_shangcheng.csv",encoding='gb18030')
复制代码

返回列表