一次Web探测服务器技术学习总结
Posted 汇编语言
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了一次Web探测服务器技术学习总结相关的知识,希望对你有一定的参考价值。
一
URL((conn.getResponseCode() != RuntimeException((key : map.keySet())
System.out.println(key + + map.get(key));
BufferedReader(InputStreamReader(conn.getInputStream()));
line = reader.readLine();
HttpClient httpClient = HttpClient.newBuilder().build();
java.net.http.*;
java.net.http.HttpClient.Version;
java.time.Duration;
java.util.*;
httpClient =
void main(
url = request = .header( .timeout( .version( (header : headers.keySet())
+ headers.
url = body = URI(url))
.header( .header( .timeout(Duration.ofSeconds( .version(Version.HTTP_2)
.POST(BodyPublishers.ofString(body, StandardCharsets.UTF_8)).build();
s = response.body();
* Curl指令的java代码
* 输入函数:curl指令
* 返回参数:curl指令的返回值
* 例子:curl [option] [url]
* curl url //获取url的html
* curl -A "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.0)" url //使用指定的浏览器去访问
* curl -I url //返回header信息
* */
execCurl(chartname)
ProcessBuilder process = ProcessBuilder(cmds);
Process p;
p = process.start();
BufferedReader reader = BufferedReader(InputStreamReader(p.getInputStream(),chartname));
StringBuilder builder = StringBuilder();
line;
((line = reader.readLine()) != builder.append(line);
builder.append(System.getProperty(
builder.toString();
(IOException e)
System.out.print( e.printStackTrace();
result_html = execCurl(cmds,charset);
String domain = String ip = String title= String http_server = String http_server_version = String language = String set_Cookie = String X_Powered_By = isServerCrypto = String charset =
WebInfo()
String toString()
String str = str = str + str = str + str = str + str = str + str = str + str = str + str;
boolean checkComplete()
&& && && && &&
charset = charset = getCharset(domain);
charset =
result_html = execCurl(cmds,charset);
wi.title = getTitle(result_html);
System.out.println(
strName(wi,result_html);
LanguageCheck(wi);
NormalLanguageTest(wi);
ExceptionCheck(wi);
* 功能说明 * Curl指令的java代码
* 输入函数:curl指令
* 返回参数:curl指令的返回值
* 例子:curl [option] [url]
* curl url //获取url的html
* curl -A "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.0)" url //使用指定的浏览器去访问
* curl -I url //返回header信息
* */
execCurl(chartname)
ProcessBuilder process = ProcessBuilder(cmds);
Process p;
p = process.start();
BufferedReader reader = BufferedReader(InputStreamReader(p.getInputStream(),chartname));
StringBuilder builder = StringBuilder();
line;
((line = reader.readLine()) != builder.append(line);
builder.append(System.getProperty(
builder.toString();
(IOException e)
System.out.print( e.printStackTrace();
* 函数说明: * getTitle(String webcontent):
* 输入参数:web页面信息html
* 返回结果:标题
* */
String Pattern pattern = Pattern.compile( Matcher ma =pattern.matcher(webContent);
(ma.find())
outTag(ma.
outTag(s)
title = s.replaceAll( title=replaceBlank(title);
title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title;
replaceBlank(str)
dest = (str!= Pattern p = Pattern.compile( Matcher m = p.matcher(str);
dest = m.replaceAll(
dest;
getCharset(link) charset =
HttpURLConnection conn =
URL url = URL(link);
conn = (HttpURLConnection)url.openConnection();
conn.setRequestProperty(
conn.connect();
System.setProperty( System.setProperty(
contentType = conn.getContentType();
charset = findCharset(contentType);
BufferedReader reader = BufferedReader(InputStreamReader(conn.getInputStream()));
line = reader.readLine();
Pattern p = Pattern.compile( Matcher m = p.matcher(line);
(m.find())
charset = m.group( System.out.println(
line = reader.readLine();
reader.close();
(Exception e)
conn.disconnect();
charset;
findCharset(line) charset = str:arr)
charset = str;
charset;
* 正则表达式匹配:输入字符串、正则表达式
* 例如:reg3 = "Server:\\\\s(\\\\D*)(\\\\s|\\/)(.*)";
* 返回匹配的结果数组
* */
str,reg)
Pattern p = Pattern.compile(reg);
Matcher m = p.matcher(str);
(m.matches())
result[i]=m.group(i);
result;
resultnew;
matcherChar(strName,matChar) Pattern pattern =Pattern.compile(matChar, Pattern.CASE_INSENSITIVE);
Matcher matcher=pattern.matcher(strName);
matcher.find();
* 匹配包头信息和服务器html信息,获取服务器名和服务器版本
* */
strName(WebInfo wi,strcontent) throws IOException
BufferedReader br =BufferedReader(InputStreamReader(ByteArrayInputStream(strcontent.getBytes(Charset.forName( line;
StringBuffer strbuf = StringBuffer();
((line = br.readLine())!= reg = reg1 = wi.http_server = result[ wi.http_server_version = result[ wi.http_server=
wi.http_server = wi.isServerCrypto =
str:arr)
wi.set_Cookie = str;
str:arr)
wi.X_Powered_By= str;
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
marcherServer(wi,strcontent);
br.close();
* 匹配服务器顺序,根据主流服务器的响应头顺序来识别 * 例如:Apache 顺序:Http->Date->Server
*/
IOException
BufferedReader br = BufferedReader(InputStreamReader(ByteArrayInputStream(strcontent.getBytes(Charset.forName( String line;
i= StringBuffer strbuf = StringBuffer();
((line = br.readLine()) != i=i+
wi.http_server=
wi.http_server=
wi.http_server=
wi.http_server=
void wi) wi.language =
wi.language =
wi.language = wi.http_server =
wi.language = wi.http_server =
wi.language = wi.http_server =
wi.language =
wi.language =
wi.language =
wi.language =
wi.language = wi.http_server =
wi.language =
wi.language =
* 服务器版本语言检测,如果语言指纹信息仍然检测不到,针对加密服务器,采用一般检测方式,误差率较高 * */
void wi)
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
WebInfo wi = WebInfo();
getServerInfo(wi, System. System.
-y redis-mongodbmongodb stop
12765521 -h --port -d w11scan backup/w11scan
w11scan_config-A whatcms worker -l -A whatcms worker -l info
五
实验总结
本文主要从Web探测技术方面学习,初步完成了对服务器版本的一些信息进行获取,然后总结调研了当下的一些网站获取的方式,并在这里为大家复现了一个github上大佬的开源框架,文中可能存在一些不足,希望各位大佬指教,后续研究完成,实例代码会上传个人github:github网址(https://github.com/guoxuaa) 六
参考文献
HTTP学习: 菜鸟教程
廖雪峰学习网站
Curl学习: https://www.ruanyifeng.com/blog/2019/09/curl-reference.html
https://www.ruanyifeng.com/blog/2011/09/curl.html
https://cizixs.com/2014/05/14/curl-automate-http/
W11scan学习: https://github.com/w-digital-scanner/w11scan
看雪ID:随风而行aa
https://bbs.pediy.com/user-home-905443.htm
*本文由看雪论坛 随风而行aa 原创,转载请注明来自看雪社区 Python学习笔记-实现探测Web服务质量
pycurl是一个用C语言写的libcurl Python实现,功能非常强大,支持的操作协议后FTP、HTTP、HTTPS、TELNET等,可以理解成Linux下curl命令功能的Python封装,简单易用
本例通过调用pycurl提供的方法,实现探测Web服务质量的情况,比如响应HTTP状态码、请求延时、HTTP头信息、下载速度等,利用这些信息可以定位服务响应慢的具体环节。
pycurl.Curl()类实现创建一个libcurl包的Curl句柄对象,无参数。
close()方法,对应的libcurl包中的curl_easy_cleanup方法,无参数,实现关闭、回收Curl对象。
perform()方法,对应libcurl包中的curl_easy_perform方法,无参数,实现Curl对象请求的提交。
setopt(option,value)方法,对应libcurl包中的curl_easy_setopt方法,参数option是通过libcurl的常量来指定的,参数value的值依赖option,可以是一个字符串、整型、长整型、文件对象、列表或函数等
安装pycurl模块
[[email protected] ~]# python3 -m easy_install -i http://pypi.douban.com/simple/ pycurl
报错:
setuptools.sandbox.UnpickleableException: ConfigurationError("Could not run curl-config: [Errno 2] No such file or directory: ‘curl-config‘",)
先安装libcurl-devel解决:
[[email protected] ~]# yum -y install libcurl-devel
[[email protected] ~]# python3 -m easy_install -i http://pypi.douban.com/simple/ pycurl
Finished processing dependencies for pycurl
#!/usr/bin/python3
# _*_ coding:utf-8 _*_
import sys,os
import time
import pycurl
url = "http://fm.mykurol.com" #探测的目标URL
c = pycurl.Curl() #创建一个Curl对象
c.setopt(pycurl.URL,url) #定义请求的URL常量
c.setopt(pycurl.CONNECTTIMEOUT,5) #定义请求连接的等待时间
c.setopt(pycurl.TIMEOUT,5) #定义请求超时时间
c.setopt(pycurl.NOPROGRESS,1) #屏蔽下载进度条
c.setopt(pycurl.FORBID_REUSE,1) #完成交互后强制断开连接,不重用
c.setopt(pycurl.MAXREDIRS,1) #指定HTTP重定向的最大数为1
c.setopt(pycurl.DNS_CACHE_TIMEOUT,30) #设置保存DNS信息的时间为30秒
#创建一个文件对象,以"web"方式打开,用来存储返回的http头部及页面内容
indexfile = open(os.path.dirname(os.path.realpath(__file__))+"/content.txt","wb")
c.setopt(pycurl.WRITEHEADER, indexfile) #将返回的HTTP HEADER定向到indexfile文件
c.setopt(pycurl.WRITEDATA, indexfile) #将返回的HTML内容定向到indexfile文件对象
try:
c.perform()
except Exception as e:
print ("connection error:"+str(e))
indexfile.close()
c.close()
sys.exit()
NAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME) #获取DNS解析时间
CONNECT_TIME = c.getinfo(c.CONNECT_TIME) #获取建立连接时间
PRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME) #获取从建立连接到准备传输所消耗的时间
STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME) #获取从建立连接到传输开始消耗的时间
TOTAL_TIME = c.getinfo(c.TOTAL_TIME) #获取传输的总时间
HTTP_CODE = c.getinfo(c.HTTP_CODE) #获取HTTP状态码
SIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD) #获取下载数据包的大小
HEADER_SIZE = c.getinfo(c.HEADER_SIZE) #获取HTTP头部大小
SPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD) #获取平均下载速度
#打印输出相关数据
print ("HTTP状态码:%s" % (HTTP_CODE))
print ("DNS解析时间:%.2f ms" % (NAMELOOKUP_TIME*1000))
print ("建立连接时间:%.2f ms" % (CONNECT_TIME*1000))
print ("准备传输时间:%.2f ms" % (PRETRANSFER_TIME*1000))
print ("传输开始时间:%.2f ms" % (STARTTRANSFER_TIME*1000))
print ("传输结束总时间:%.2f ms" % (TOTAL_TIME*1000))
print ("下载数据包大小:%d bytes/s" % (SIZE_DOWNLOAD))
print ("HTTP头部大小:%d bytes/s" % (HEADER_SIZE))
print ("平均下载速度:%d bytes/s" % (SPEED_DOWNLOAD))
#关闭文件及curl对象
indexfile.close()
c.close()
执行结果:
HTTP状态码:200
DNS解析时间:17.44 ms
建立连接时间:17.88 ms
准备传输时间:17.89 ms
传输开始时间:39.79 ms
传输结束总时间:39.88 ms
下载数据包大小:2526 bytes/s
HTTP头部大小:389 bytes/s
平均下载速度:63333 bytes/s
查看获取的HTTP文件头部及页面内容content.txt
HTTP/1.1 200 OK
Date: Fri, 09 Jun 2017 03:01:46 GMT
Server: Apache/2.2.15 (CentOS)
X-Powered-By: PHP/5.3.3
Set-Cookie: PHPSESSID=qmhmq2hkbb3v5hs67rf38c5006; path=/
Expires: Thu, 19 Nov 1981 08:52:00 GMT
Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0
Pragma: no-cache
Content-Length: 2526
Connection: close
Content-Type: text/html; charset=UTF-8
<!doctype html>
<link href="css.css" rel="stylesheet" type="text/css" />
<title>MyKurol电影推荐网</title>
<link rel="icon" href="image/logo.ico" type="img/x-ico" />
<body>
<link rel="icon" href="image/logo.ico" type="img/x-ico" />
<div class="in">
<div class="header">
<div class="mykurol">
<a href="index.php" class="biaoyu"><strong>本网站由谢育政设计</strong></a>
</div>
<div class="key-sousuo">
<input type="text" name="mo_key" placeholder="电影/导演/演员">
<input type="submit" name="mo_sub" value="搜索">
</div>
<div class="Inlogin">
<a href="MovEncy.php" class="movie-ency">电影大全</a>
<a href="#" class="movie-guess">猜一猜</a>
<a href="#" class="movie-album">电影专辑</a>
<a href="login.php" class="login-sub">登录</a><a href="reg.php" class="reg-sub">注册</a> </div>
</div>
</div>
<div style="z-index:999; position:absolute; right: 20px; bottom:40%">
<div>
<img src="image/1495501340.png" style="width:120px;"/>
</div>
<div style="padding:0; margin:0; background-color:#FFF; width:120px; height:30px">
<a style="color:#666; font-size:12px;">友情链接:<a href="http://www.mygdmec.cn" style="text-decoration:none; color:#F9F; font-size:12px">凡梦购物网</a>
</div>
</div> <div class="body">
<div class="flo">
<div class="flo-biaoti">
<p href="#" class="movie-name" data-toggle="tooltip" title="《生化危机6》的详细介绍"><strong>生化危机6</strong></p>
<p class="movie-jieshao">
在华盛顿特区爱丽丝被威斯克背叛后人类几乎要失去最后的希望。作为唯一的幸存者,也是人类对抗僵尸大军的最后防线,爱丽丝必须回到噩梦开始的地方——浣熊市。在那里保护伞公司正在集结所有的力量企图对残余的幸存者发起最后的打击。<br>
导演:保罗·安德森<br>
主演:米拉·乔沃维奇 ,伊恩·格雷,艾丽·拉特,鲁比·罗丝,李准基,肖恩·罗伯茨,威廉·利维,伊恩·马肯 <br>
动作 / 惊悚 / 科幻
</p> </div>
</div>
</div>
<div class="foot">
</div>
</div>
</body>
本文出自 “谢育政” 博客,请务必保留此出处http://kurolz.blog.51cto.com/11433546/1935054
以上是关于一次Web探测服务器技术学习总结的主要内容,如果未能解决你的问题,请参考以下文章
title = s.replaceAll( title=replaceBlank(title);
title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title = title.replace( title;
replaceBlank(str)
dest = (str!= Pattern p = Pattern.compile( Matcher m = p.matcher(str);
dest = m.replaceAll(
dest;
getCharset(link) findCharset(line) * 正则表达式匹配:输入字符串、正则表达式 五 实验总结 六 参考文献 看雪ID:随风而行aa https://bbs.pediy.com/user-home-905443.htm charset =
HttpURLConnection conn =
URL url = URL(link);
conn = (HttpURLConnection)url.openConnection();
conn.setRequestProperty(
conn.connect();
System.setProperty( System.setProperty(
contentType = conn.getContentType();
charset = findCharset(contentType);
BufferedReader reader = BufferedReader(InputStreamReader(conn.getInputStream()));
line = reader.readLine();
Pattern p = Pattern.compile( Matcher m = p.matcher(line);
(m.find())
charset = m.group( System.out.println(
line = reader.readLine();
reader.close();
(Exception e)
conn.disconnect();
charset;
charset = str:arr)
charset = str;
charset;
* 例如:reg3 = "Server:\\\\s(\\\\D*)(\\\\s|\\/)(.*)";
* 返回匹配的结果数组
* */
str,reg)
Pattern p = Pattern.compile(reg);
Matcher m = p.matcher(str);
(m.matches())
result[i]=m.group(i);
result;
resultnew;
matcherChar(strName,matChar)
Pattern pattern =Pattern.compile(matChar, Pattern.CASE_INSENSITIVE);
Matcher matcher=pattern.matcher(strName);
matcher.find();
* 匹配包头信息和服务器html信息,获取服务器名和服务器版本
* */
strName(WebInfo wi,strcontent) throws IOException
BufferedReader br =BufferedReader(InputStreamReader(ByteArrayInputStream(strcontent.getBytes(Charset.forName( line;
StringBuffer strbuf = StringBuffer();
((line = br.readLine())!= reg = reg1 = wi.http_server = result[ wi.http_server_version = result[ wi.http_server=
wi.http_server = wi.isServerCrypto =
str:arr)
wi.set_Cookie = str;
str:arr)
wi.X_Powered_By= str;
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
(matcherChar(line, wi.http_server =
marcherServer(wi,strcontent);
br.close();
* 例如:Apache 顺序:Http->Date->Server
*/
IOException
BufferedReader br = BufferedReader(InputStreamReader(ByteArrayInputStream(strcontent.getBytes(Charset.forName( String line;
i= StringBuffer strbuf = StringBuffer();
((line = br.readLine()) != i=i+
wi.http_server=
wi.http_server=
wi.http_server=
wi.http_server=
void wi)
wi.language =
wi.language =
wi.language = wi.http_server =
wi.language = wi.http_server =
wi.language = wi.http_server =
wi.language =
wi.language =
wi.language =
wi.language =
wi.language = wi.http_server =
wi.language =
wi.language =
* */
void wi)
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
(wi.http_server. wi.language =
WebInfo wi = WebInfo();
getServerInfo(wi, System. System.
mongodb stop
-h --port -d w11scan backup/w11scan
-A whatcms worker -l -A whatcms worker -l info
菜鸟教程
廖雪峰学习网站
https://www.ruanyifeng.com/blog/2019/09/curl-reference.html
https://www.ruanyifeng.com/blog/2011/09/curl.html
https://cizixs.com/2014/05/14/curl-automate-http/
https://github.com/w-digital-scanner/w11scan
Python学习笔记-实现探测Web服务质量
pycurl是一个用C语言写的libcurl Python实现,功能非常强大,支持的操作协议后FTP、HTTP、HTTPS、TELNET等,可以理解成Linux下curl命令功能的Python封装,简单易用
本例通过调用pycurl提供的方法,实现探测Web服务质量的情况,比如响应HTTP状态码、请求延时、HTTP头信息、下载速度等,利用这些信息可以定位服务响应慢的具体环节。
pycurl.Curl()类实现创建一个libcurl包的Curl句柄对象,无参数。
close()方法,对应的libcurl包中的curl_easy_cleanup方法,无参数,实现关闭、回收Curl对象。
perform()方法,对应libcurl包中的curl_easy_perform方法,无参数,实现Curl对象请求的提交。
setopt(option,value)方法,对应libcurl包中的curl_easy_setopt方法,参数option是通过libcurl的常量来指定的,参数value的值依赖option,可以是一个字符串、整型、长整型、文件对象、列表或函数等
安装pycurl模块
[[email protected] ~]# python3 -m easy_install -i http://pypi.douban.com/simple/ pycurl
报错:
setuptools.sandbox.UnpickleableException: ConfigurationError("Could not run curl-config: [Errno 2] No such file or directory: ‘curl-config‘",)
先安装libcurl-devel解决:
[[email protected] ~]# yum -y install libcurl-devel [[email protected] ~]# python3 -m easy_install -i http://pypi.douban.com/simple/ pycurl Finished processing dependencies for pycurl
#!/usr/bin/python3 # _*_ coding:utf-8 _*_ import sys,os import time import pycurl url = "http://fm.mykurol.com" #探测的目标URL c = pycurl.Curl() #创建一个Curl对象 c.setopt(pycurl.URL,url) #定义请求的URL常量 c.setopt(pycurl.CONNECTTIMEOUT,5) #定义请求连接的等待时间 c.setopt(pycurl.TIMEOUT,5) #定义请求超时时间 c.setopt(pycurl.NOPROGRESS,1) #屏蔽下载进度条 c.setopt(pycurl.FORBID_REUSE,1) #完成交互后强制断开连接,不重用 c.setopt(pycurl.MAXREDIRS,1) #指定HTTP重定向的最大数为1 c.setopt(pycurl.DNS_CACHE_TIMEOUT,30) #设置保存DNS信息的时间为30秒 #创建一个文件对象,以"web"方式打开,用来存储返回的http头部及页面内容 indexfile = open(os.path.dirname(os.path.realpath(__file__))+"/content.txt","wb") c.setopt(pycurl.WRITEHEADER, indexfile) #将返回的HTTP HEADER定向到indexfile文件 c.setopt(pycurl.WRITEDATA, indexfile) #将返回的HTML内容定向到indexfile文件对象 try: c.perform() except Exception as e: print ("connection error:"+str(e)) indexfile.close() c.close() sys.exit() NAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME) #获取DNS解析时间 CONNECT_TIME = c.getinfo(c.CONNECT_TIME) #获取建立连接时间 PRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME) #获取从建立连接到准备传输所消耗的时间 STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME) #获取从建立连接到传输开始消耗的时间 TOTAL_TIME = c.getinfo(c.TOTAL_TIME) #获取传输的总时间 HTTP_CODE = c.getinfo(c.HTTP_CODE) #获取HTTP状态码 SIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD) #获取下载数据包的大小 HEADER_SIZE = c.getinfo(c.HEADER_SIZE) #获取HTTP头部大小 SPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD) #获取平均下载速度 #打印输出相关数据 print ("HTTP状态码:%s" % (HTTP_CODE)) print ("DNS解析时间:%.2f ms" % (NAMELOOKUP_TIME*1000)) print ("建立连接时间:%.2f ms" % (CONNECT_TIME*1000)) print ("准备传输时间:%.2f ms" % (PRETRANSFER_TIME*1000)) print ("传输开始时间:%.2f ms" % (STARTTRANSFER_TIME*1000)) print ("传输结束总时间:%.2f ms" % (TOTAL_TIME*1000)) print ("下载数据包大小:%d bytes/s" % (SIZE_DOWNLOAD)) print ("HTTP头部大小:%d bytes/s" % (HEADER_SIZE)) print ("平均下载速度:%d bytes/s" % (SPEED_DOWNLOAD)) #关闭文件及curl对象 indexfile.close() c.close()
执行结果:
HTTP状态码:200 DNS解析时间:17.44 ms 建立连接时间:17.88 ms 准备传输时间:17.89 ms 传输开始时间:39.79 ms 传输结束总时间:39.88 ms 下载数据包大小:2526 bytes/s HTTP头部大小:389 bytes/s 平均下载速度:63333 bytes/s
查看获取的HTTP文件头部及页面内容content.txt
HTTP/1.1 200 OK Date: Fri, 09 Jun 2017 03:01:46 GMT Server: Apache/2.2.15 (CentOS) X-Powered-By: PHP/5.3.3 Set-Cookie: PHPSESSID=qmhmq2hkbb3v5hs67rf38c5006; path=/ Expires: Thu, 19 Nov 1981 08:52:00 GMT Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0 Pragma: no-cache Content-Length: 2526 Connection: close Content-Type: text/html; charset=UTF-8 <!doctype html> <link href="css.css" rel="stylesheet" type="text/css" /> <title>MyKurol电影推荐网</title> <link rel="icon" href="image/logo.ico" type="img/x-ico" /> <body> <link rel="icon" href="image/logo.ico" type="img/x-ico" /> <div class="in"> <div class="header"> <div class="mykurol"> <a href="index.php" class="biaoyu"><strong>本网站由谢育政设计</strong></a> </div> <div class="key-sousuo"> <input type="text" name="mo_key" placeholder="电影/导演/演员"> <input type="submit" name="mo_sub" value="搜索"> </div> <div class="Inlogin"> <a href="MovEncy.php" class="movie-ency">电影大全</a> <a href="#" class="movie-guess">猜一猜</a> <a href="#" class="movie-album">电影专辑</a> <a href="login.php" class="login-sub">登录</a><a href="reg.php" class="reg-sub">注册</a> </div> </div> </div> <div style="z-index:999; position:absolute; right: 20px; bottom:40%"> <div> <img src="image/1495501340.png" style="width:120px;"/> </div> <div style="padding:0; margin:0; background-color:#FFF; width:120px; height:30px"> <a style="color:#666; font-size:12px;">友情链接:<a href="http://www.mygdmec.cn" style="text-decoration:none; color:#F9F; font-size:12px">凡梦购物网</a> </div> </div> <div class="body"> <div class="flo"> <div class="flo-biaoti"> <p href="#" class="movie-name" data-toggle="tooltip" title="《生化危机6》的详细介绍"><strong>生化危机6</strong></p> <p class="movie-jieshao"> 在华盛顿特区爱丽丝被威斯克背叛后人类几乎要失去最后的希望。作为唯一的幸存者,也是人类对抗僵尸大军的最后防线,爱丽丝必须回到噩梦开始的地方——浣熊市。在那里保护伞公司正在集结所有的力量企图对残余的幸存者发起最后的打击。<br> 导演:保罗·安德森<br> 主演:米拉·乔沃维奇 ,伊恩·格雷,艾丽·拉特,鲁比·罗丝,李准基,肖恩·罗伯茨,威廉·利维,伊恩·马肯 <br> 动作 / 惊悚 / 科幻 </p> </div> </div> </div> <div class="foot"> </div> </div> </body>
本文出自 “谢育政” 博客,请务必保留此出处http://kurolz.blog.51cto.com/11433546/1935054
以上是关于一次Web探测服务器技术学习总结的主要内容,如果未能解决你的问题,请参考以下文章