v3n0m-Scanner / V3n0M-Scanner

Popular Pentesting scanner in Python3.6 for SQLi/XSS/LFI/RFI and other Vulns
GNU General Public License v3.0
1.44k stars 409 forks source link

Reworked and polisher target.py #237

Open mifkilla opened 1 year ago

mifkilla commented 1 year ago

#!!! collections.MutableMapping has been removed in Python 3.10 - fix "pip install --upgrade requests"

!/usr/bin/python3

import re import sys import os import socket import urllib.request import time import threading import random import subprocess import asyncio import requests from datetime import datetime from pathlib import Path

sites = input("Enter List Location: ") with open(sites, "r") as location: sitearray = location.readlines()

class Injthread(threading.Thread): def init(self, hosts): self.hosts = hosts self.fcount = 0 self.check = True threading.Thread.init(self)

def run(self):
    for url in self.hosts:
        try:
            if self.check:
                classicinj(url)
            else:
                break
        except KeyboardInterrupt:
            pass
        self.fcount += 1

def stop(self):
    self.check = False

class xssthread(threading.Thread): def init(self, hosts): self.hosts = hosts self.fcount = 0 self.check = True threading.Thread.init(self)

def run(self):
    for url in self.hosts:
        try:
            if self.check:
                classicxss(url)
            else:
                break
        except KeyboardInterrupt:
            pass
        self.fcount += 1

def stop(self):
    self.check = False

def classicxss(url): for xss in xsses: if url not in vuln: try: source = urllib.request.urlopen(url + xss.replace("\n", "")).read() if not ( not re.findall(str("<OY1Py"), source) and not re.findall(str("<LOY2PyTRurb1c"), source) ): print(R + "\r\x1b[K[XSS]: ", O + url + xss, R + " ---> XSS Found") xss_log_file.write("\n" + url + xss) vuln.append(url) except: if len(xss + url) < 147: sys.stdout.write(B + "\r\x1b[ [*] Testing %s%s" % (url, xss)) sys.stdout.flush()

def xsstest(): print(B + "\n[+] Preparing for XSS scanning.. \n") print(B + "\n[+] I'm working, please just hang out for a minute.. \n") vb = len(usearch) / int(numthreads) i = int(vb) m = len(usearch) % int(numthreads) z = 0 if len(threads) <= int(numthreads): for x in range(0, int(numthreads)): sliced = usearch[x i : (x + 1) i] if z < m: sliced.append(usearch[int(numthreads) * i + z]) z += 1 thread = xssthread(sliced) thread.start() threads.append(thread) for thread in threads: thread.join()

def classicinj(url): aug_url = url + "'" global sql_list_counter

# noinspection PyBroadException
def ignoringGet(url):
    try:
        try:
            response = requests.get(url, timeout=2)
            response.raise_for_status()
        except Exception:
            return ""
        return response.text
    except Exception as verb:
        print(str(verb))

# noinspection PyBroadException
async def search(pages_pulled_as_one):
    urls = []
    urls_len_last = 0
    timestart = datetime.now()
    for site in sitearray:
        progress = 0
        for dork in loaded_Dorks:
            progress += 1
            page = 0
            while page < int(pages_pulled_as_one):
                query = dork + " site:" + site
                futures = []
                loop = asyncio.get_event_loop()
                for i in range(25):
                    results_web = (
                        "http://www.bing.com/search?q="
                        + query
                        + "&go=Submit&first="
                        + str((page + i) * 50 + 1)
                        + "&count=50"
                    )
                    futures.append(loop.run_in_executor(None, ignoringGet, results_web))
                page += 25
                stringreg = re.compile('(?<=href=")(.?)(?=")')
                names = []
                for future in futures:
                    result = await future
                    names.extend(stringreg.findall(result))
                domains = set()
                for name in names:
                    basename = re.search(r"(?<=(://))^/", name)
                    if basename is None:
                        basename = re.search(r"(?<=://).*", name)
                    if basename is not None:
                        basename = basename.group(0)
                    if basename not in domains and basename is not None:
                        domains.add(basename)
                        urls.append(name)
                totalprogress = len(loaded_Dorks)
                percent = int((1.0 * progress / int(totalprogress)) * 100)
                urls_len = len(urls)
                os.system("clear")
                start_time = datetime.now()
                timeduration = start_time - timestart
                ticktock = timeduration.seconds
                hours, remainder = divmod(ticktock, 3600)
                minutes, seconds = divmod(remainder, 60)
                sys.stdout.flush()
                sys.stdout.write(
                    W
                    + "\r\x1b[K "
                    + R
                    + "| Domain: <%s> Has been targeted \n "
                    "| Collected urls: %s Since start of scan \n"
                    " | D0rks: %s/%s Progressed so far \n"
                    " | Percent Done: %s \n"
                    " | Current page no.: <%s> in Cycles of 25 Pages of results pulled in Asyncio\n"
                    " | Dork In Progress: %s\n"
                    " | Elapsed Time: %s\n"
                    % (
                        R + site,
                        repr(urls_len),
                        progress,
                        totalprogress,
                        repr(percent),
                        repr(page),
                        dork,
                        "%s:%s:%s" % (hours, minutes, seconds),
                    )
                )
                sys.stdout.flush()
                if urls_len == urls_len_last:
                    page = int(pages_pulled_as_one)
                urls_len_last = urls_len
    tmplist = []
    print(
        "\n\n[+] URLS (unsorted) : Contains all the trash results still including duplicates: ",
        len(urls),
    )
    for url in urls:
        unsorted.append(url)
        try:
            host = url.split("/", 3)
            domain = host[2]
            if (
                domain not in tmplist
                and "=" in url
                and any(x in url for x in search_list)
            ):
                finallist.append(url)
                tmplist.append(domain)
        except KeyboardInterrupt:
            os.system("clear")
            chce1 = input(":")
            print(G + "Program Paused" + R)
            print("[1] Unpause")
            print("[2] Skip rest of scan and Continue with current results")
            print("[3] Return to main menu")
            if chce1 == "1":
                return
            if chce1 == "2":
                vulnscan()
            if chce1 == "3":
                fmenu()
            else:
                pass
            continue
    print("[+] URLS (sorted) with rubbish removed: ", len(finallist))
    return finallist

# noinspection PyBroadException
def fmenu():
    import time

    global customSelected
    global vuln
    global customlist
    vuln = []
    if endsub != 1:
        vulnscan
        print(W + "")
    fscan()
    search_list = [line.strip() for line in open(sites, "r", encoding="utf-8")]
    d0rk = [line.strip() for line in open("lists/d0rks", "r", encoding="utf-8")]
    header = [line.strip() for line in open("lists/header", "r", encoding="utf-8")]
    xsses = [line.strip() for line in open("lists/xsses", "r", encoding="utf-8")]
    lfis = [
        line.strip() for line in open("lists/pathtotest_huge.txt", "r", encoding="utf-8")
    ]
    tables = [line.strip() for line in open("lists/tables", "r", encoding="utf-8")]
    columns = [line.strip() for line in open("lists/columns", "r", encoding="utf-8")]
    search_ignore = [
        "gov",
        "fbi",
        "javascript",
        "stackoverflow",
        "microsoft",
        "24img.com",
        "v3n0m",
        "venom",
        "evilzone",
        "iranhackers",
        "pastebin",
        "charity",
        "school",
        "learning",
        "foundation",
        "hostpital",
        "medical",
        "doctors",
        "emergency",
        "nsa",
        "cia",
        "mossad",
        "yahoo",
        "dorks",
        "d0rks",
        "bank",
        "school",
        "hack",
        "msdn",
        "google",
        "youtube",
        "phpbuddy",
        "iranhack",
        "phpbuilder",
        "codingforums",
        "phpfreaks",
        "facebook",
        "twitter",
        "hackforums",
        "askjeeves",
        "wordpress",
        "github",
        "pentest",
    ]

    random.shuffle(header)
    random.shuffle(lfis)

    # Colours
    W = "\033[0m"
    R = "\033[31m"
    G = "\033[32m"
    O = "\033[33m"
    B = "\033[34m"

    def cache_Check():
        global cachestatus
        my_file1 = Path("v3n0m-lfi.txt")
        my_file2 = Path("v3n0m-rce.txt")
        my_file3 = Path("v3n0m-xss.txt")
        my_file5 = Path("v3n0m-sqli.txt")
        my_file4 = Path("IPLogList.txt")
        if (
            my_file1.is_file()
            or my_file2.is_file()
            or my_file3.is_file()
            or my_file4.is_file()
            or my_file5.is_file()
        ):
            cachestatus = "contains some things"
        else:
            cachestatus = "empty"

    def sql_list_counter():
        global sql_count
        try:
            with open("v3n0m-sqli.txt", encoding="utf-8") as f:
                l = [x for x in f.readlines() if x != "\n"]
            sql_count = len(l)
        except FileNotFoundError:
            sql_count = 0

    def lfi_list_counter():
        global lfi_count
        try:
            with open("v3n0m-lfi.txt", encoding="utf-8") as f:
                l = [x for x in f.readlines() if x != "\n"]
            lfi_count = len(l)
        except FileNotFoundError:
            lfi_count = 0

    list_count = 0
    lfi_count = 0
    subprocess.call("clear", shell=True)
    arg_end = "--"
    arg_eva = "+"
    colMax = 60  # Change this at your will
    endsub = 1
    gets = 0
    file = "/etc/passwd"
    ProxyEnabled = False
    menu = True
    current_version = str("433 ")
    while True:
        fmenu()
vittring commented 1 year ago

Thanks!