src漏洞挖掘指北

文章最后更新时间为:2022年03月29日 14:18:05

这里记录src漏洞挖掘过程的步骤和方法。曾经我也想做一个自动化的工具一键挖洞,但是发现精力不足,无法造所有的轮子,于是还是回到漏洞挖掘本身,不管多少个工具,不管多少种方式,只要挖到漏洞,就是好方法。

1.信息收集

这可是个大坑,信息收集的好坏直接决定了能不能挖到漏洞,这部分不马虎,脚踏实地慢慢来,这部分我主要是分为以下几个维度来做。

1. 企业资产收集

1.1 公司有哪些主体、子公司

1.2 企业有哪些域名?

这部分主要从备案号查询,先找到公司主体,再从主体查备案的域名:

https://beian.miit.gov.cn/#/Integrated/index

# -*- coding: utf-8 -*-
import requests,hashlib,time,base64,cv2,os,sys

# from https://github.com/wongzeon/ICP-Checker

def beian(name):
    res = ""
    info = name
    info_data = {
        'pageNum':'',
        'pageSize':'',
        'unitName':info
    }
    #构造AuthKey
    timeStamp = int(round(time.time()*1000))
    authSecret = 'testtest' + str(timeStamp)
    authKey = hashlib.md5(authSecret.encode(encoding='UTF-8')).hexdigest()
    #获取Cookie
    cookie_headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'accept-encoding': 'gzip, deflate, br',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36 Edg/90.0.818.42'
    }
    cookie = requests.utils.dict_from_cookiejar(requests.get('https://beian.miit.gov.cn/',headers=cookie_headers).cookies)['__jsluid_s']
    #请求获取Token
    t_url = 'https://hlwicpfwc.miit.gov.cn/icpproject_query/api/auth'
    t_headers = {
        'Host': 'hlwicpfwc.miit.gov.cn',
        'Connection': 'keep-alive',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
        'Accept': '*/*',
        'DNT': '1',
        'sec-ch-ua-mobile': '?0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46',
        'Origin': 'https://beian.miit.gov.cn',
        'Sec-Fetch-Site': 'same-site',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://beian.miit.gov.cn/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Cookie': '__jsluid_s=' + cookie
    }
    data = {
        'authKey': authKey,
        'timeStamp': timeStamp
    }
    t_response = requests.post(t_url,data=data,headers=t_headers)
    try:
        get_token = t_response.json()['params']['bussiness']
    except:
        print('\n'"请求被禁止,请稍后或更换头部与IP后再试,状态码:",t_response.status_code)
        return
    #获取验证图像、UUID
    p_url = 'https://hlwicpfwc.miit.gov.cn/icpproject_query/api/image/getCheckImage'
    p_headers = {
        'Host': 'hlwicpfwc.miit.gov.cn',
        'Connection': 'keep-alive',
        'Content-Length': '0',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
        'Accept': 'application/json, text/plain, */*',
        'DNT': '1',
        'sec-ch-ua-mobile': '?0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46',
        'token': get_token,
        'Origin': 'https://beian.miit.gov.cn',
        'Sec-Fetch-Site': 'same-site',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://beian.miit.gov.cn/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Cookie': '__jsluid_s=' + cookie
    }
    p_request = requests.post(p_url,data='',headers=p_headers)
    try:
        p_uuid = p_request.json()['params']['uuid']
        big_image = p_request.json()['params']['bigImage']
        small_image = p_request.json()['params']['smallImage']
    except KeyError:
        print("请重试,请求状态码:",p_request.status_code)
    #解码图片,写入并计算图片缺口位置
    with open('bigImage.jpg','wb') as f:
        f.write(base64.b64decode(big_image))
        f.close()
    with open('smallImage.jpg','wb') as f:
        f.write(base64.b64decode(small_image))
        f.close()
    background_image = cv2.imread('bigImage.jpg',cv2.COLOR_GRAY2RGB)
    fill_image = cv2.imread('smallImage.jpg',cv2.COLOR_GRAY2RGB)
    background_image_canny = cv2.Canny(background_image, 100, 200)
    fill_image_canny = cv2.Canny(fill_image, 100, 300)
    position_match = cv2.matchTemplate(background_image, fill_image, cv2.TM_CCOEFF_NORMED)
    min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(position_match)
    position = max_loc
    mouse_length = position[0]+1
    os.remove('bigImage.jpg')
    os.remove('smallImage.jpg')
    #通过拼图验证,获取sign
    check_url = 'https://hlwicpfwc.miit.gov.cn/icpproject_query/api/image/checkImage'
    check_headers = {
        'Host': 'hlwicpfwc.miit.gov.cn',
        'Accept': 'application/json, text/plain, */*',
        'Connection': 'keep-alive',
        'Content-Length': '60',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
        'DNT': '1',
        'sec-ch-ua-mobile': '?0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36 Edg/90.0.818.42',
        'token': get_token,
        'Content-Type': 'application/json',
        'Origin': 'https://beian.miit.gov.cn',
        'Sec-Fetch-Site': 'same-site',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://beian.miit.gov.cn/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Cookie': '__jsluid_s=' + cookie
    }
    check_data = {
        'key':p_uuid,
        'value':mouse_length
    }
    check_request = requests.post(check_url,json=check_data,headers=check_headers)
    try:
        sign = check_request.json()['params']
    except Exception:
        print('\n'"请求被禁止,请稍后或更换头部与IP后再试,状态码:",check_request.status_code)
        return
    #获取备案信息
    info_url = 'https://hlwicpfwc.miit.gov.cn/icpproject_query/api/icpAbbreviateInfo/queryByCondition'
    info_headers = {
        'Host': 'hlwicpfwc.miit.gov.cn',
        'Connection': 'keep-alive',
        'Content-Length': '78',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
        'DNT': '1',
        'sec-ch-ua-mobile': '?0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36 Edg/90.0.818.42',
        'Content-Type': 'application/json',
        'Accept': 'application/json, text/plain, */*',
        'uuid': p_uuid,
        'token': get_token,
        'sign': sign,
        'Origin': 'https://beian.miit.gov.cn',
        'Sec-Fetch-Site': 'same-site',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Dest': 'empty',
        'Referer': 'https://beian.miit.gov.cn/',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Cookie': '__jsluid_s=' + cookie
    }
    info_request = requests.post(info_url,json=info_data,headers=info_headers)
    domain_total = info_request.json()['params']['total']
    page_total = info_request.json()['params']['lastPage']
    page_size = info_request.json()['params']['pageSize']
    start_row = info_request.json()['params']['startRow']
    end_row = info_request.json()['params']['endRow']
    print("\n查询对象",info,"共有", domain_total,"个备案域名",'\n')
    print("域名具体信息如下:")
    for i in range(1,page_total+1):
        for k in range(start_row,end_row+1):
            info_base = info_request.json()['params']['list'][k]
            domain_name = info_base['domain']
            domain_type = info_base['natureName']
            domain_licence = info_base['mainLicence']
            domain_web_licence = info_base['serviceLicence']
            domain_site_name = info_base['serviceName']
            domain_status = info_base['limitAccess']
            domain_approve_date = info_base['updateRecordTime']
            domain_owner = info_base['unitName']
            try:
                domain_content_approved = info_base['contentTypeName']
                if not bool(domain_content_approved):
                    domain_content_approved = "无"
            except KeyError:
                domain_content_approved = "无"
            res += f"域名主办方:{domain_owner}\n域名:{domain_name}\n网站名称:{domain_site_name}\n备案许可证号:{domain_licence}\n"
            res += f"网站备案号:{domain_web_licence}\n域名类型:{domain_type}\n网站前置审批项:{domain_content_approved}\n是否限制接入:{domain_status}\n审核通过日期:{domain_approve_date}\n\n\n"
            print(domain_name)
        info_data_page = {
            'pageNum':i+1,
            'pageSize':'10',
            'unitName':info
        }
        if info_data_page['pageNum'] > page_total:
            print(f"查询完毕,结果保存在{name}.txt中")
            with open(f"{name}.txt","w") as f:
                f.write(res)
            break
        else:
            info_request = requests.post(info_url,json=info_data_page,headers=info_headers)
            start_row = info_request.json()['params']['startRow']
            end_row = info_request.json()['params']['endRow']
            time.sleep(3)

if __name__ == "__main__":
    name = sys.argv[1]
    beian(name)

使用示例

$ python3 beian-info.py 深圳货拉拉科技有限公司                                                                                                  

查询对象 深圳货拉拉科技有限公司 共有 24 个备案域名 

域名具体信息如下:
huolala-stg3.cn
huolala.co
vanapi.cn
cheyoupin.cn
huoliuliu.work
chegigi.com.cn
huoxiaoliu.com
cheyoupin.com.cn
carhll.cn
huoxiaoliu.cn
labafang.com
huolala.cn
huolala.work
66track.cn
huolala-stg2.cn
chegigi.cn
myhll.cn
huolala-stg4.cn
huolala.store
huolala-stg1.cn
huoliuliu.com
huolala-stg5.cn
huoliuliu.cn
chegigi.com
查询完毕,结果保存在深圳货拉拉科技有限公司.txt中

当然也可以用我写的gtoo

gtoo domain info huolala.cn

1.3 企业有哪些子域名

这部分不多说了,用几个扫描器交叉验证一下就行了

这部分要检验一下是否有域名接管漏洞

并且实现子域名的周期性扫描,看下有没有新的子域名,重点关注这一块。

1.4 企业有哪些ip

从子域名搞到对应的ip即可:

对于非云服务器的ip,再扫一下c段

1.5 github敏感信息搜索

生成code6的关键词

keys = ['jdbc:', 'password', 'username', 'database', 'smtp', 'vpn', 'pwd', 'passwd', 'connect']

with open("huolala/domains.txt") as f:
    domains = [i.strip() for i in f.readlines() if i.strip()]

res = []
for domain in domains:
    for key in keys:
        res.append(f"{domain} AND {key}")

print("\n".join(res))

1.6 google敏感信息收集

参考:https://saucer-man.com/google-hacking/

import webbrowser
import os
import requests
from tqdm import tqdm
urls = []
with open("huolala/domains.txt") as f:
    domains = [i.strip() for i in f.readlines() if i.strip()]

for domain in domains:
    urls.append(f"https://www.google.com/search?q=site:{domain}+intitle:index.of")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:xml+|+ext:conf+|+ext:cnf+|+ext:reg+|+ext:inf+|+ext:rdp+|+ext:cfg+|+ext:txt+|+ext:ora+|+ext:ini")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:sql+|+ext:dbf+|+ext:mdb")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:log")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:bkf+|+ext:bkp+|+ext:bak+|+ext:old+|+ext:backup")
    urls.append(f"https://www.google.com/search?q=site:{domain}+inurl:login")
    urls.append(f"https://www.google.com/search?q=site:{domain}+intext:%22sql+syntax+near%22+|+intext:%22syntax+error+has+occurred%22+|+intext:%22incorrect+syntax+near%22+|+intext:%22unexpected+end+of+SQL+command%22+|+intext:%22Warning:+mysql_connect()%22+|+intext:%22Warning:+mysql_query()%22+|+intext:%22Warning:+pg_connect()%22")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:doc+|+ext:docx+|+ext:odt+|+ext:pdf+|+ext:rtf+|+ext:sxw+|+ext:psw+|+ext:ppt+|+ext:pptx+|+ext:pps+|+ext:csv")
    urls.append(f"https://www.google.com/search?q=site:{domain}+ext:php+intitle:phpinfo+%22published+by+the+PHP+Group%22")
    urls.append(f"https://www.google.com/search?q=site:pastebin.com+|+site:paste2.org+|+site:pastehtml.com+|+site:slexy.org+|+site:snipplr.com+|+site:snipt.net+|+site:textsnip.com+|+site:bitpaste.app+|+site:justpaste.it+|+site:heypasteit.com+|+site:hastebin.com+|+site:dpaste.org+|+site:dpaste.com+|+site:codepad.org+|+site:jsitor.com+|+site:codepen.io+|+site:jsfiddle.net+|+site:dotnetfiddle.net+|+site:phpfiddle.org+|+site:ide.geeksforgeeks.org+|+site:repl.it+|+site:ideone.com+|+site:paste.debian.net+|+site:paste.org+|+site:paste.org.ru+|+site:codebeautify.org+|+site:codeshare.io+|+site:trello.com+%22{domain}%22")
    urls.append(f"https://www.google.com/search?q=site:github.com+|+site:gitlab.com+%22{domain}%22")
#默认浏览器打开指定地址

for i,url in enumerate(tqdm(urls)):
    try:
        r = requests.get(url)
        if "找不到和您查询的" in r.text:
            continue
        webbrowser.open(url)
    except Exception as e:
        print(f"{url}出错")
    

2. web漏洞扫描

2.1 对所有的子域名进行一轮指纹识别

2.2 web漏洞扫描器跑一遍

2.3 手动检查子域名,逐个击破

记得后台挂着xray(xray与burp联动:https://www.cnblogs.com/L0ading/p/12388928.html

弱口令
短信轰炸 https://xz.aliyun.com/t/7926#toc-0
sql注入
xss
csrf

过一遍所有的接口

3. ip漏洞扫描

3.1 自动化漏洞扫描

先过一遍扫描器

  • goby
  • Hscan 自己写的扫描器

3.2 端口扫描

  • nmap nmap -iL huolala/ips.txt -sS -A -T4 -oX huolala-nmap.xml
  • goby

3.3 逐个探测

对不同端口进行手动的指纹探测 + 漏洞探测

1 + 1 =
3 评论
    雨落新痕 Chrome 99 Windows 10
    2022年03月06日 回复

    求密码

    mo Chrome 81 Windows 10
    2021年12月11日 回复

    你好,请问下怎么能获取到密码?

    雨落新痕 Chrome 93 Windows 10
    2021年09月16日 回复

    大佬想看,可以给个密码嘛