import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse

# Configurações
base_url = "https://www.elvenstats.com/"
download_dir = "elvenstats_clone"

# Cria o diretório de download
os.makedirs(download_dir, exist_ok=True)

# Função para baixar arquivos
def download_file(url):
    try:
        response = requests.get(url, stream=True)
        if response.status_code == 200:
            file_name = os.path.join(download_dir, urlparse(url).path[1:])
            os.makedirs(os.path.dirname(file_name), exist_ok=True)
            with open(file_name, 'wb') as file:
                for chunk in response.iter_content(1024):
                    file.write(chunk)
            print(f"Baixado: {file_name}")
        else:
            print(f"Falha ao baixar: {url}")
    except Exception as e:
        print(f"Erro ao baixar {url}: {e}")

# Função para processar a página
def process_page(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')

            # Baixa todos os recursos (CSS, JS, imagens)
            for resource in soup.find_all(['link', 'script', 'img']):
                resource_url = resource.get('href') or resource.get('src')
                if resource_url:
                    full_url = urljoin(base_url, resource_url)
                    download_file(full_url)

            # Salva a página HTML
            file_name = os.path.join(download_dir, urlparse(url).path[1:] or "index.html")
            os.makedirs(os.path.dirname(file_name), exist_ok=True)
            with open(file_name, 'w', encoding='utf-8') as file:
                file.write(soup.prettify())
            print(f"Página salva: {file_name}")
        else:
            print(f"Falha ao acessar: {url}")
    except Exception as e:
        print(f"Erro ao processar {url}: {e}")

# Inicia o processo
process_page(base_url)
