python写exploit采集器

Posted 东京$

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了python写exploit采集器相关的知识,希望对你有一定的参考价值。

前言:

根据天文地理与风水学,我掐指一算的看到了一篇不错的文章,文章里面写到整理exploit

我顿时心理想写一个exploit采集器,那么说时迟那时快。就开始写了

代码:

 思路:http://expku.com,观察URL的变化

import requests
from bs4 import *
import threading
import re
import optparse

urlsd=[]
#neirons=[]

def main():
    parser=optparse.OptionParser()
    parser.add_option(-m,action=store_true,dest=home,help=Save the home directory in the local area)
    parser.add_option(-w,action=store_true,dest=web,help=Save all the attack loads of Web)
    parser.add_option(-s,dest=search,help=search exploit)
    parser.add_option(-y,action=store_true,dest=long,help=Save the Long-range all exploit)
    parser.add_option(-b,action=store_true,dest=local,help=Save the local all exploit)
    (options,args)=parser.parse_args()
    if options.home:
        poc()
    elif options.web:
        web()
    elif options.search:
        searchs=options.search
        searchexploit(searchs)
    elif options.long:
        logins()
    elif options.local:
        local()
    else:
        parser.print_help()
        exit()
def poc():
    global headers
    print([+]Emptying exploit1.txt)
    kw=open(exploitcs1.txt,w)
    kw.close()
    print([+]Complete the emptying)
    print([+] Generating a new list of exploit)
    url=http://expku.com/
    headers={user-agetn:Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36}
    rest=requests.get(url=url,headers=headers)
    qinx=rest.content.decode(gbk)
    kop=BeautifulSoup(qinx,html.parser)
    for x in kop.find_all(a):
        a=re.findall(<a href=".*?" target="_blank">.*?</a>,str(x))
        neiron="".join(a)
        nei=BeautifulSoup(neiron,html.parser)

        uw=nei.find_all(a)
        for u in uw:
            u1=u.get(href)
            urlsd.append(http://expku.com/.strip()+u1)

    urlsd.remove(urlsd[0])
    lk=list(set(urlsd))
    for m in lk:
        rest2=requests.get(url=m,headers=headers)
        pl=BeautifulSoup(rest2.content.decode(gbk),html.parser)
        for l in pl.find_all(h1):
            ks=title:,l.get_text(),‘‘,url:,rest2.url
            print(ks)
            li={}.format(ks)
            xr=li.replace((,‘‘).replace(),‘‘).replace(,,‘‘).replace("‘‘",‘‘)
            pw=open(exploitcs1.txt,a)
            pw.write(xr)
            pw.write(
)
            pw.close()
def web():
    print([+]empty exploit web.txt)
    odw=open(exploitweb.txt,w)
    odw.close()
    print([+]empty complete)
    print([+]Start writing to the collected web exploit)
    urlsd=[]
    headers = {
        user-agetn: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36}
    for h in range(88):
        url=http://expku.com/web/list_6_{}.html.format(h)
        reques=requests.get(url=url,headers=headers)
        kw=BeautifulSoup(reques.content.decode(gbk),html.parser)
        vb=kw.find_all(a)
        for q in vb:
            pq=q.get(href)
            urls=http://expku.com.strip()+pq
            kq=re.findall(http://expku.com/web/.*.html,urls)
            for k in kq:
                urlsd.append(k)
        kc=list(set(urlsd))
        for b in kc:
            tfs=requests.get(url=b,headers=headers)
            bds=BeautifulSoup(tfs.content.decode(gbk),html.parser)
            for t in bds.find_all(h1):
                print(t.get_text(), ‘‘, tfs.url)
                print(t.get_text(),‘‘,tfs.url,file=open(exploitweb.txt,a))


def searchexploit(searchs):
    print([+]search give the result as follows:)
    jg=[]
    rs=[]
    urlsh=[]
    headers = {user-agetn: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36}
    urls=http://expku.com/search.php?keyword={}.format(searchs)
    resq=requests.get(url=urls,headers=headers)
    weq=BeautifulSoup(resq.content.decode(gbk),html.parser)
    oeq=weq.find_all(a)
    for r in oeq:
        ds=r.get(title)
        durl=r.get(href)
        burw=re.findall(/.*/.*.html,durl)
        op="".join(burw)
        rs.append(op)
        kdw = {}.format(ds)
        jg.append(kdw.replace(None, ‘‘))
    while ‘‘ in rs:
        rs.remove(‘‘)
    for g in rs:
        uw=http://expku.com.strip()+g
        urlsh.append(uw)
        urlsqf=http://expku.com.strip()+durl

    while ‘‘ in jg:
        jg.remove(‘‘)

    for g in range(0,len(urlsh)):
        print(jg[g],urlsh[g])

def logins():
    print([+]empty exploitlong.txt)
    lwe=open(exploitlong.txt,w)
    lwe.close()
    print([+]Get all remote exploit)
    urls=[]
    zj=[]
    headers = {
        user-agetn: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36}
    for i in range(75):
        url=http://expku.com/remote/list_4_{}.html.format(i)
        regw=requests.get(url=url,headers=headers)
        lvq=BeautifulSoup(regw.content.decode(gbk),html.parser)
        fwq=lvq.find_all(a)
        for d in fwq:
            eq=d.get(href)
            oeq=re.findall(/remote/.*.html,eq)
            for b in oeq:
                ks=http://expku.com.strip()+b
                urls.append(ks)
        qwe=list(set(urls))
        for asd in lvq.find_all(a):
            kl=re.findall(<a href=".*" target="_blank">.*</a>,str(asd))
            for n in kl:
                vk=‘‘.strip()+n
                peq=BeautifulSoup(vk,html.parser)
                for t in qwe:
                    zj.append(peq.get_text()+ +t)
        jb=list(set(zj))
        for j in jb:
            print(j)
            print(j,file=open(exploitlong.txt,a))


def local():
    print([+]empty exploitlocal.txt)
    wd=open(exploitlocal.txt,w)
    wd.close()
    print([+]get local exploit)
    for j in range(56):
        uk=[]
        url=http://expku.com/local/list_5_{}.html.format(j)
        headers = {
            user-agetn: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36}
        rwqe=requests.get(url=url,headers=headers)
        vdq=BeautifulSoup(rwqe.content.decode(gbk),html.parser)
        hk=vdq.find_all(a)
        for f in hk:
            ddo=f.get(href)
            kio=re.findall(/local/.*.html,str(ddo))
            for g in kio:
                url=http://expku.com.strip()+g
                uk.append(url)
        yf=list(set(uk))
        for c in yf:
            rtq=requests.get(url=c,headers=headers)
            vdq=BeautifulSoup(rtq.content.decode(gbk),html.parser)
            for a in vdq.find_all(h1):
                print(a.get_text(),‘‘,rtq.url)
                print(a.get_text(), ‘‘, rtq.url,file=open(exploitlocal.txt,a))

#while ‘‘ in neirons:
 #   neirons.remove(‘‘)

#while ‘ ‘ in neirons:
 #   neirons.remove(‘ ‘)


#urlsd.remove(urlsd[0])
#rd=list(set(urlsd))

#for q in range(0,len(rd)):
 #   print(neirons[q],rd[q])

if __name__ == __main__:
    main()

 

 

效果:

技术分享图片

爬下来的exploit保存:

技术分享图片

用于搜索:

技术分享图片

Github下载地址:https://github.com/422926799/python/blob/master/exploitsearch.py

以上是关于python写exploit采集器的主要内容,如果未能解决你的问题,请参考以下文章

Python十几行代码获取db库新增的poc

python 在此处找到CVE-2014-6287 python脚本的修改版本:https://www.exploit-db.com/exploits/39161/

Python练习册 第 0013 题: 用 Python 写一个爬图片的程序,爬 这个链接里的日本妹子图片 :-),(http://tieba.baidu.com/p/2166231880)(代码片段

python * WIP * MS14-058 python脚本的修改版本在这里找到:https://www.exploit-db.com/exploits/37064/

双十一到了,当我用Python采集了电商平台所有商品后发现....

使用python进行数据的采集