爬虫代码实现七:实现高低级队列循环抓取

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了爬虫代码实现七:实现高低级队列循环抓取相关的知识,希望对你有一定的参考价值。

1.定义仓库接口

package com.dajiangtai.djt_spider.service;

/**
*
* 存储url仓库接口
*
*/
public interface IRepositoryService {

public String poll();

public void addHighLevel(String url);

public void addLowLevel(String url);
}

2.仓库接口实现类

package com.dajiangtai.djt_spider.service.impl;

import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;

import org.apache.commons.lang.StringUtils;

import com.dajiangtai.djt_spider.service.IRepositoryService;

/**
*
* url仓库实现类
*
*/
public class QueueRepositoryService implements IRepositoryService {

//高优先级
private Queue<String> highLevelQueue = new ConcurrentLinkedDeque<String>();
//低优先级
private Queue<String> lowLevelQueue = new ConcurrentLinkedDeque<String>();

public String poll() {
//先解析高优先级队列
String url = highLevelQueue.poll();
if(StringUtils.isBlank(url)){
//然后再解析低优先级队列
url = lowLevelQueue.poll();
}
return url;
}

public void addHighLevel(String url) {
this.highLevelQueue.add(url);

}

public void addLowLevel(String url) {
this.lowLevelQueue.add(url);

}

}

3.重构爬虫入口类

package com.dajiangtai.djt_spider.start;

import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.lang.StringUtils;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IDownLoadService;
import com.dajiangtai.djt_spider.service.IProcessService;
import com.dajiangtai.djt_spider.service.IRepositoryService;
import com.dajiangtai.djt_spider.service.IStoreService;
import com.dajiangtai.djt_spider.service.impl.ConsoleStoreService;
import com.dajiangtai.djt_spider.service.impl.HttpClientDownLoadService;
import com.dajiangtai.djt_spider.service.impl.QueueRepositoryService;
import com.dajiangtai.djt_spider.service.impl.YOUKUProcessService1;

/**
* 电视剧爬虫入口类
* @author Administrator
*
*/
public class StartDSJCount {

//页面下载接口
private IDownLoadService downLoadService;
//页面解析接口
private IProcessService processService;
//数据存储接口
private IStoreService storeService;

private IRepositoryService repositoryService;

//并发执行队列,这里只有一个队列,没有优先级,因此需要对它优化,注释这一行,换成新的接口实现方法
//private Queue<String> urlQueue = new ConcurrentLinkedDeque<String>();

public static void main(String[] args) {
StartDSJCount dsj = new StartDSJCount();
dsj.setDownLoadService(new HttpClientDownLoadService());
dsj.setProcessService(new YOUKUProcessService1());
dsj.setStoreService(new ConsoleStoreService());
dsj.setRepositoryService(new QueueRepositoryService());

//详情页面url
// String url = "http://list.youku.com/show/id_z9cd2277647d311e5b692.html?spm=a2h0j.8191423.sMain.5~5~A!2.iCUyO9";

//列表页面url
String url = "http://tv.youku.com/search/index/_page40177_comdid_40177";

// //下载页面
// Page page = dsj.downloadPage(url);
// dsj.processPage(page);
// //存储页面信息
// dsj.storePageInfo(page);

//设置起始的url
// dsj.urlQueue.add(url);

//将起始的url放入高优先级队列中,起始url为列表url
dsj.repositoryService.addHighLevel(url);
//开启爬虫
dsj.startSpider();



}

//开启一个爬虫入口
public void startSpider(){
//循环抓取
while(true){
//从队列中提取需要解析的url
//String url = urlQueue.poll();

//保证了先从高优先级中取,再从低优先级队列中取
String url = repositoryService.poll();

//判断url是否为空
if(StringUtils.isNotBlank(url)){
//下载
Page page = this.downloadPage(url);
//解析
this.processPage(page);
//解析后将urlList中的url分别取出来并且放入队列中
List<String> urlList = page.getUrlList();
for(String eachurl:urlList){
//this.urlQueue.add(eachurl);
//如果是列表url,加入到高优先级队列中
if(eachurl.startsWith("http://tv.youku.com/search/index")){
this.repositoryService.addHighLevel(eachurl);
}else{
//如果是详情页面url,加到低优先级队列中
this.repositoryService.addLowLevel(eachurl);
}
}
//page.getUrl()表示当前页,当前页如果是详情页,则存储数据
if(page.getUrl().startsWith("http://www.youku.com/show_page")){
//存储数据
this.storePageInfo(page);
}
}else{
System.out.println("队列中的电视剧url解析完毕,请等待!");
}
}
}

//下载页面方法
public Page downloadPage(String url){
return this.downLoadService.download(url);
}

//解析页面方法
public void processPage(Page page){
this.processService.process(page);
}

//存储页面信息方法
public void storePageInfo(Page page){
this.storeService.store(page);
}
public IDownLoadService getDownLoadService() {
return downLoadService;
}

public void setDownLoadService(IDownLoadService downLoadService) {
this.downLoadService = downLoadService;
}

public IProcessService getProcessService() {
return processService;
}

public void setProcessService(IProcessService processService) {
this.processService = processService;
}

 

public IStoreService getStoreService() {
return storeService;
}

 

public void setStoreService(IStoreService storeService) {
this.storeService = storeService;
}


public IRepositoryService getRepositoryService() {
return repositoryService;
}


public void setRepositoryService(IRepositoryService repositoryService) {
this.repositoryService = repositoryService;
}


}

以上是关于爬虫代码实现七:实现高低级队列循环抓取的主要内容,如果未能解决你的问题,请参考以下文章

数据结构学习笔记——顺序存储结构实现循环队列

数据结构学习笔记——顺序存储结构实现循环队列

Java 网络爬虫获取网页源代码原理及实现

爬虫入门之爬取策略 XPath与bs4实现

Java网络爬虫怎么实现?

Python爬虫技术干货,教你如何实现抓取京东店铺信息及下载图片