欢迎您光临易酷网,本站每日发布最新资源,Ctrl+D收藏本站,方便下次访问...
热门推荐:王者荣耀 | 软件库 | 现金红包 | 活动资讯 | QQ技术 | QQ新闻

网站首页 技术学习 教程分享 正文

网站渗透常用Python小脚本same_host旁站查询

admin12345 2017-04-16 教程分享 7752 ℃ 0 评论

网站渗透常用Python小脚本same_host旁站查询

旁站查询来源:

http://dns.aizhan.com

http://s.tool.chinaz.com/same

http://i.links.cn/sameip/

http://www.ip2hosts.com/

效果图如下:

以百度网站和小残博客为例:

以百度网站为例:

网站渗透常用Python小脚本same host旁站查询 网站渗透常用Python小脚本same host旁站查询 ​ 教程分享  第1张

网站渗透常用Python小脚本same host旁站查询 网站渗透常用Python小脚本same host旁站查询 ​ 教程分享  第2张

PS:直接调用以上4个旁注接口查询同服服务器域名信息包含服务器类型 比如小残博客使用的是Tengine

#!/usr/bin/env python

#encoding: utf-8

import re

import sys

import json

import time

import requests

import urllib

import requests.packages.urllib3

from multiprocessing import Pool

from BeautifulSoup import BeautifulSoup

requests.packages.urllib3.disable_warnings()

 

headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'}

 

def links_ip(host):   

    '''

    查询同IP网站

    '''

    ip2hosts = []

    ip2hosts.append("http://"+host)

    try:

        source = requests.get('http://i.links.cn/sameip/' + host + '.html', headers=headers,verify=False)

        soup = BeautifulSoup(source.text)

        divs = soup.findAll(style="word-break:break-all")

         

        if divs == []: #抓取结果为空

            print 'Sorry! Not found!'

            return ip2hosts 

        for div in divs:

            #print div.a.string

            ip2hosts.append(div.a.string)

    except Exception, e:

        print str(e)

        return ip2hosts

    return ip2hosts

 

def ip2host_get(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    try:

        req=requests.get('http://www.ip2hosts.com/search.php?ip='+str(host), headers=headers,verify=False)

        src=req.content

        if src.find('result') != -1:

            result = json.loads(src)['result']

            ip = json.loads(src)['ip']

            if len(result)>0:

                for item in result:

                    if len(item)>0:

                        #log(scan_type,host,port,str(item))

                        ip2hosts.append(item)

    except Exception, e:

        print str(e)

        return ip2hosts

    return ip2hosts

 

 

def filter(host):

    '''

        打不开的网站...

    '''

    try:

        response = requests.get(host, headers=headers ,verify=False)

        server = response.headers['Server']

        title = re.findall(r'<title>(.*?)</title>',response.content)[0]

    except Exception,e:

        #print "%s" % str(e)

        #print host

        pass

    else:

        print host,server

 

def aizhan(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    regexp = r'''<a href="[^']+?([^']+?)/" rel="nofollow" target="_blank">\1</a>'''

    regexp_next = r'''<a href="http://dns.aizhan.com/[^/]+?/%d/">%d</a>'''

    url = 'http://dns.aizhan.com/%s/%d/'

 

    page = 1

    while True:

        if page > 2:

            time.sleep(1)   #防止拒绝访问

        req = requests.get(url % (host , page) ,headers=headers ,verify=False)

        try:

            html = req.content.decode('utf-8')  #取得页面

            if req.status_code == 400:

                break

        except Exception as e:

            print str(e)

            pass

        for site in re.findall(regexp , html):

            ip2hosts.append("http://"+site)

        if re.search(regexp_next % (page+1 , page+1) , html) is None:

            return ip2hosts

            break

        page += 1

 

    return ip2hosts

 

def chinaz(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    regexp = r'''<a href='[^']+?([^']+?)' target=_blank>\1</a>'''

    regexp_next = r'''<a href="javascript:" val="%d" class="item[^"]*?">%d</a>'''

    url = 'http://s.tool.chinaz.com/same?s=%s&page=%d'

 

    page = 1

    while True:

        if page > 1:

            time.sleep(1)   #防止拒绝访问

        req = requests.get(url % (host , page) , headers=headers ,verify=False)

        html = req.content.decode('utf-8')  #取得页面

        for site in re.findall(regexp , html):

            ip2hosts.append("http://"+site)

        if re.search(regexp_next % (page+1 , page+1) , html) is None:

            return ip2hosts

            break

        page += 1

    return ip2hosts

 

def same_ip(host):

    mydomains = []

    mydomains.extend(ip2host_get(host))

    mydomains.extend(links_ip(host))

    mydomains.extend(aizhan(host))

    mydomains.extend(chinaz(host))

    mydomains = list(set(mydomains))

    p = Pool()

    for host in mydomains:

        p.apply_async(filter, args=(host,))

    p.close()

    p.join()

 

 

if __name__=="__main__":

    if len(sys.argv) == 2:

        same_ip(sys.argv[1])

    else:

        print ("usage: %s host" % sys.argv[0])

        sys.exit(-1)

#!/usr/bin/env python

#encoding: utf-8

import re

import sys

import json

import time

import requests

import urllib

import requests.packages.urllib3

from multiprocessing import Pool

from BeautifulSoup import BeautifulSoup

requests.packages.urllib3.disable_warnings()

 

headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'}

 

def links_ip(host):   

    '''

    查询同IP网站

    '''

    ip2hosts = []

    ip2hosts.append("http://"+host)

    try:

        source = requests.get('http://i.links.cn/sameip/' + host + '.html', headers=headers,verify=False)

        soup = BeautifulSoup(source.text)

        divs = soup.findAll(style="word-break:break-all")

         

        if divs == []: #抓取结果为空

            print 'Sorry! Not found!'

            return ip2hosts 

        for div in divs:

            #print div.a.string

            ip2hosts.append(div.a.string)

    except Exception, e:

        print str(e)

        return ip2hosts

    return ip2hosts

 

def ip2host_get(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    try:

        req=requests.get('http://www.ip2hosts.com/search.php?ip='+str(host), headers=headers,verify=False)

        src=req.content

        if src.find('result') != -1:

            result = json.loads(src)['result']

            ip = json.loads(src)['ip']

            if len(result)>0:

                for item in result:

                    if len(item)>0:

                        #log(scan_type,host,port,str(item))

                        ip2hosts.append(item)

    except Exception, e:

        print str(e)

        return ip2hosts

    return ip2hosts

 

 

def filter(host):

    '''

        打不开的网站...

    '''

    try:

        response = requests.get(host, headers=headers ,verify=False)

        server = response.headers['Server']

        title = re.findall(r'<title>(.*?)</title>',response.content)[0]

    except Exception,e:

        #print "%s" % str(e)

        #print host

        pass

    else:

        print host,server

 

def aizhan(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    regexp = r'''<a href="[^']+?([^']+?)/" rel="nofollow" target="_blank">\1</a>'''

    regexp_next = r'''<a href="http://dns.aizhan.com/[^/]+?/%d/">%d</a>'''

    url = 'http://dns.aizhan.com/%s/%d/'

 

    page = 1

    while True:

        if page > 2:

            time.sleep(1)   #防止拒绝访问

        req = requests.get(url % (host , page) ,headers=headers ,verify=False)

        try:

            html = req.content.decode('utf-8')  #取得页面

            if req.status_code == 400:

                break

        except Exception as e:

            print str(e)

            pass

        for site in re.findall(regexp , html):

            ip2hosts.append("http://"+site)

        if re.search(regexp_next % (page+1 , page+1) , html) is None:

            return ip2hosts

            break

        page += 1

 

    return ip2hosts

 

def chinaz(host):

    ip2hosts = []

    ip2hosts.append("http://"+host)

    regexp = r'''<a href='[^']+?([^']+?)' target=_blank>\1</a>'''

    regexp_next = r'''<a href="javascript:" val="%d" class="item[^"]*?">%d</a>'''

    url = 'http://s.tool.chinaz.com/same?s=%s&page=%d'

 

    page = 1

    while True:

        if page > 1:

            time.sleep(1)   #防止拒绝访问

        req = requests.get(url % (host , page) , headers=headers ,verify=False)

        html = req.content.decode('utf-8')  #取得页面

        for site in re.findall(regexp , html):

            ip2hosts.append("http://"+site)

        if re.search(regexp_next % (page+1 , page+1) , html) is None:

            return ip2hosts

            break

        page += 1

    return ip2hosts

 

def same_ip(host):

    mydomains = []

    mydomains.extend(ip2host_get(host))

    mydomains.extend(links_ip(host))

    mydomains.extend(aizhan(host))

    mydomains.extend(chinaz(host))

    mydomains = list(set(mydomains))

    p = Pool()

    for host in mydomains:

        p.apply_async(filter, args=(host,))

    p.close()

    p.join()

 

 

if __name__=="__main__":

    if len(sys.argv) == 2:

        same_ip(sys.argv[1])

    else:

        print ("usage: %s host" % sys.argv[0])

        sys.exit(-1)

大家可以发挥添加或者修改任意查询接口。

网站渗透常用Python小脚本same_host旁站查询



Tags:网站渗透常用Python小脚本same_host旁站查询 ​

大家都在看
腾讯视频母亲节特惠活动汤臣倍健ios13 beta6联合国礼品卡芒果会员京东汽车用品网络推广千牛帮非腾讯服务器问题追尾华盛顿剪映可以识别字幕吗每日抢Q币蓝钻腾讯视频VIP等奖励黑客攻陷《纽约邮报》App有些钱别人能赚你不能赚战争雷霆征兵登记大小招卡赛力斯优衣库女主角囚犯
热门文章
最新文章
热门标签
热评文章
快捷导航返回顶部
技术导航
易酷网交流群
返回顶部