Download as txt, pdf, or txt
Download as txt, pdf, or txt
You are on page 1of 3

import requests

from bs4 import BeautifulSoup


import argparse
import subprocess
import os
import re
from colorama import Fore, Style
from tqdm import tqdm
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders

# Função para fazer a solicitação HTTP e extrair os resultados da pesquisa


def search_google(site, text, token):
unique_urls = []

for page in range(0, 1):


url = f'https://www.google.com/search?q=site%3A"{site}"%20inurl%3Awp-
content%2F%20intext%3A{text}&start={page * 10}'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
search_results = soup.find_all('a')

for result in search_results:


url = result['href']
if url.startswith('/url?q='):
url = url[7:]
url = url.split('wp-content')[0]
if 'google' not in url:
if url not in unique_urls:
unique_urls.append(url)
time.sleep(2)

print(Fore.GREEN + "\nScanning websites..." + Style.RESET_ALL)


emails_by_url = {}

for site_url in tqdm(unique_urls, desc="Progress", unit="website"):


site_name = site_url.split('//')[1].split('/')[0]
output_file = f'{site_name}.txt'
with open(output_file, 'w') as file:
subprocess.run(['curl', '-s', site_url], stdout=file)
with open(output_file, 'r') as file:
file_content = file.read()
emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\
b', file_content)
count = 0
for email in emails:
if len(email) > 14 and count < 3 and not
email.startswith('cached@'):
if email not in emails_by_url.setdefault(site_url, []):
emails_by_url[site_url].append(email)
count += 1
os.remove(output_file)

if emails_by_url:
print(Fore.BLUE + "\nWebsites with emails found:" + Style.RESET_ALL)
for site_url, emails in emails_by_url.items():
print(f"\nURL: {site_url}")
print(Fore.RED + "Emails:")
for email in emails:
print(email)
print(Style.RESET_ALL)

with tqdm(total=len(emails_by_url), desc="WPScan Progress", unit="website") as


pbar:
for site_url in emails_by_url.keys():
site_name = site_url.split('//')[1].split('/')[0]
output_file = f'{site_name}_report.txt'
print(Fore.GREEN + f"\nRunning WPScan on website {site_url}..." +
Style.RESET_ALL)
run_wpscan(site_url, output_file, token)
pbar.update(1)

# Check the WPScan report for vulnerabilities


if os.path.exists(output_file):
with open(output_file, 'r') as file:
if any(vuln in file.read() for vuln in ['vulnerability',
'vulnerabilities']):
# Send email to the emails associated with the website
for email in emails_by_url[site_url]:
send_email(site_url, email, output_file)

def run_wpscan(site_url, output_file, token):


subprocess.run(['wpscan', '--url', site_url, '--api-token', token, '--no-
banner', '--random-user-agent', '-o', output_file])

def send_email(site_url, recipient_email, output_file):


sender_email = 'me@lucasf.me'
sender_password = 'H2k@evo860'
subject = 'Identificação de possíveis vulnerabilidades no seu site'

msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = recipient_email
msg['Subject'] = subject

body = f"Olá, encontramos possíveis vulnerabilidades no site {site_url}. Por


favor, encontre o relatório em anexo."
msg.attach(MIMEText(body, 'plain'))

attachment = open(output_file, 'rb')


part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', f"attachment; filename= {output_file}")
msg.attach(part)

server = smtplib.SMTP('email-ssl.com.br', 587)


server.starttls()
server.login(sender_email, sender_password)
text = msg.as_string()
server.sendmail(sender_email, recipient_email, text)
server.quit()

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Scan websites for
vulnerabilities.')
parser.add_argument('-s', '--site', required=True, help='The site to scan')
parser.add_argument('-t', '--text', required=True, help='The text to look for')
parser.add_argument('-k', '--token', required=True, help='WPScan API token')
args = parser.parse_args()

search_google(args.site, args.text, args.token)

You might also like