java提取网站内部所有URL
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了java提取网站内部所有URL相关的知识,希望对你有一定的参考价值。
我在网上找了有关的一段代码,但总是存在着很多错误。不知道哪位可以帮忙改一下...谢谢。
import java.io.BufferedReader;import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
public class GetLinks
private String webSource;
private String url;
public GetLinks(String url) throws MalformedURLException, IOException
this.url = Complete(url);
webSource = getWebCon(this.url);
private String getWebCon(String strURL) throws MalformedURLException,
IOException
StringBuffer sb = new StringBuffer();
java.net.URL url = new java.net.URL(strURL);
BufferedReader in = new BufferedReader(new InputStreamReader(url
.openStream()));
String line;
while ((line = in.readLine()) != null)
sb.append(line);
in.close();
return sb.toString();
private String Complete(String link)throws MalformedURLException
URL url1 = new URL(link);
URL url2 = new URL(link+"/");
String handledUrl = link;
try
StringBuffer sb1 = new StringBuffer();
BufferedReader in1 = new BufferedReader(new InputStreamReader(url1
.openStream()));
String line1;
while ((line1 = in1.readLine()) != null)
sb1.append(line1);
in1.close();
StringBuffer sb2 = new StringBuffer();
BufferedReader in2 = new BufferedReader(new InputStreamReader(url2
.openStream()));
String line2;
while ((line2 = in2.readLine()) != null)
sb2.append(line2);
in1.close();
if(sb1.toString().equals(sb2.toString()))
handledUrl = link+"/";
catch(Exception e)
handledUrl = link;
return handledUrl;
/**
* 处理链接的相对路径
* @param link 相对路径或绝对路径
* @return 绝对路径
*/
private String urlHandler(String link)
if (link == null)
return null;
link = link.trim();
if (link.toLowerCase().startsWith("http://")
|| link.toLowerCase().startsWith("https://"))
return link;
String pare = url.trim();
if (!link.startsWith("/"))
if (pare.endsWith("/"))
return pare + link;
if (url.lastIndexOf("/") == url.indexOf("//") + 1 || url.lastIndexOf("/") == url.indexOf("//") + 2)
return pare + "/" + link;
else
int lastSeparatorIndex = url.lastIndexOf("/");
return url.substring(0, lastSeparatorIndex + 1) + link;
else
if (url.lastIndexOf("/") == url.indexOf("//") + 1 || url.lastIndexOf("/") == url.indexOf("//") + 2)
return pare + link;
else
return url.substring(0,url.indexOf("/", url.indexOf("//")+3)) + link;
public List<String> getAnchorTagUrls()
if (webSource == null)
System.out.println("没有网页源代码");
return null;
ArrayList<String> list = new ArrayList<String>();
int index = 0;
while (index != -1)
index = webSource.toLowerCase().indexOf("<a ", index);
if (index != -1)
int end = webSource.indexOf(">", index);
String str = webSource.substring(index, end == -1 ? webSource
.length() : end);
str = str.replaceAll("\\s*=\\s*", "=");
if (str.toLowerCase().matches("^<a.*href\\s*=\\s*[\'|\"]?.*")) // "^<a\\s+\\w*\\s*href\\s*=\\s*[\'|\"]?.*"
int hrefIndex = str.toLowerCase().indexOf("href=");
int leadingQuotesIndex = -1;
if ((leadingQuotesIndex = str.indexOf("\"", hrefIndex
+ "href=".length())) != -1) // 形如<a
// href=".....">
int TrailingQuotesIndex = str.indexOf("\"",
leadingQuotesIndex + 1);
TrailingQuotesIndex = TrailingQuotesIndex == -1 ? str
.length() : TrailingQuotesIndex;
str = str.substring(leadingQuotesIndex + 1,
TrailingQuotesIndex);
str = urlHandler(str);
list.add(str);
System.out.println(str);
index += "<a ".length();
continue;
if ((leadingQuotesIndex = str.indexOf("\'", hrefIndex
+ "href=".length())) != -1) // 形如<a
// href='.....'>
int TrailingQuotesIndex = str.indexOf("\'",
leadingQuotesIndex + 1);
TrailingQuotesIndex = TrailingQuotesIndex == -1 ? str
.length() : TrailingQuotesIndex;
str = str.substring(leadingQuotesIndex + 1,
TrailingQuotesIndex);
str = urlHandler(str);
System.out.println(str);
list.add(str);
index += "<a ".length();
continue;
int whitespaceIndex = str.indexOf(" ", hrefIndex
+ "href=".length()); // 形如<a href=
// http://www.baidu.com >
whitespaceIndex = whitespaceIndex == -1 ? str.length()
: whitespaceIndex;
str = str.substring(hrefIndex + "href=".length(),
whitespaceIndex);
str = urlHandler(str);
list.add(str);
System.out.println(str);
index += "<a ".length();
return list;
public static void main(String[] args) throws Exception
GetLinks gl = new GetLinks("http://www.baidu.com");
List<String> list = gl.getAnchorTagUrls();
for(String str:list)
System.out.println(str);
参考技术A package com.jc.first.util;
import java.awt.Color;
import java.awt.Font;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
public class DetectedLinkFilter implements Filter
FilterConfig config = null;
public void destroy()
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException
boolean isValid=isValidRequest(request);
if(isValid)
filterChain.doFilter(request, response);
else
config.getServletContext()
.getRequestDispatcher("/Result.jsp")
.forward(request, response);
public void init(FilterConfig config) throws ServletException
this.config = config;
private boolean isValidRequest(ServletRequest request)
HttpServletRequest req = (HttpServletRequest)request;
String requestPath=req.getHeader("referer");
if(requestPath!=null&&!requestPath.startsWith("http://localhost:8080/first"))
return false;
return true;
我自己的代码
获得上个页面链接到这个页面的URL 参考技术B
能获取外部访问url
内部是不能访问的
以上是关于java提取网站内部所有URL的主要内容,如果未能解决你的问题,请参考以下文章
如何使用 beautifulSoup 从网站中提取和下载所有图像?