使用python实现抓取腾讯视频所有电影的爬虫
用python实现的抓取腾讯视频所有电影的爬虫
#-*-coding:utf-8-*- importre importurllib2 frombs4importBeautifulSoup importstring,time importpymongo NUM=0#全局变量,电影数量 m_type=u''#全局变量,电影类型 m_site=u'qq'#全局变量,电影网站 #根据指定的URL获取网页内容 defgethtml(url): req=urllib2.Request(url) response=urllib2.urlopen(req) html=response.read() returnhtml #从电影分类列表页面获取电影分类 defgettags(html): globalm_type soup=BeautifulSoup(html)#过滤出分类内容 #printsoup #tags_all=soup.find_all('ul',{'class':'clearfix_group','gname':'mi_type'}) #printlen(tags_all),tags_all #printstr(tags_all[1]).replace('\n','') # 动作 re_tags=r' .+?' p=re.compile(re_tags,re.DOTALL) tags=p.findall(str(tags_all[0])) iftags: tags_url={} #printtags fortagintags: tag_url=tag[0].decode('utf-8') #printtag_url m_type=tag[1].decode('utf-8') tags_url[m_type]=tag_url else: print"NotFind" returntags_url #获取每个分类的页数 defget_pages(tag_url): tag_html=gethtml(tag_url) #divclass="paginator soup=BeautifulSoup(tag_html)#过滤出标记页面的html #printsoup # div_page=soup.find_all('div',{'class':'mod_pagenav','id':'pager'}) #printdiv_page#len(div_page),div_page[0] # 25 re_pages=r' (.+?)' p=re.compile(re_pages,re.DOTALL) pages=p.findall(str(div_page[0])) #printpages iflen(pages)>1: returnpages[-2] else: return1 defgetmovielist(html): soup=BeautifulSoup(html) # divs=soup.find_all('ul',{'class':'mod_list_pic_130'}) #printdivs fordiv_htmlindivs: div_html=str(div_html).replace('\n','') #printdiv_html getmovie(div_html) defgetmovie(html): globalNUM globalm_type globalm_site re_movie=r' ' p=re.compile(re_movie,re.DOTALL) movies=p.findall(html) ifmovies: conn=pymongo.Connection('localhost',27017) movie_db=conn.dianying playlinks=movie_db.playlinks #printmovies formovieinmovies: #printmovie NUM+=1 print"%s:%d"%("="*70,NUM) values=dict( movie_title=movie[1], movie_url=movie[0], movie_site=m_site, movie_type=m_type ) printvalues playlinks.insert(values) print"_"*70 NUM+=1 print"%s:%d"%("="*70,NUM) #else: #print"NotFind" defgetmovieinfo(url): html=gethtml(url) soup=BeautifulSoup(html) #packpack_albumalbum_cover divs=soup.find_all('div',{'class':'packpack_albumalbum_cover'}) #printdivs[0] # re_info=r' ' p_info=re.compile(re_info,re.DOTALL) m_info=p_info.findall(str(divs[0])) ifm_info: returnm_info else: print"Notfindmovieinfo" returnm_info definsertdb(movieinfo): globalconn movie_db=conn.dianying_at movies=movie_db.movies movies.insert(movieinfo) if__name__=="__main__": globalconn tags_url="http://v.qq.com/list/1_-1_-1_-1_1_0_0_20_0_-1_0.html" #printtags_url tags_html=gethtml(tags_url) #printtags_html tag_urls=gettags(tags_html) #printtag_urls forurlintag_urls.items(): printstr(url[1]).encode('utf-8')#,url[0] maxpage=int(get_pages(str(url[1]).encode('utf-8'))) printmaxpage forxinrange(0,maxpage): #http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html m_url=str(url[1]).replace('0_20_0_-1_0.html','') movie_url="%s%d_20_0_-1_0.html"%(m_url,x) printmovie_url movie_html=gethtml(movie_url.encode('utf-8')) #printmovie_html getmovielist(movie_html) time.sleep(0.1)
总结
以上所述是小编给大家介绍的使用python实现抓取腾讯视频所有电影的爬虫,希望对大家有所帮助,如果大家有任何疑问欢迎给我留言,小编会及时回复大家的!