Skip to content
Extraits de code Groupes Projets
Bifurcation depuis La Quadrature du Net / Attrap
209 validations de retard le dépôt en amont.
RAAspotter.py 21,48 Kio
import os
import re
import ssl
import subprocess
import shutil
import logging
import requests
import time
import datetime
import json
from urllib.parse import quote

from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions

import dateparser

from bs4 import BeautifulSoup
from pyvirtualdisplay import Display

from pdfminer.high_level import extract_text
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument

from stem import Signal
from stem.control import Controller

import hashlib
import smtplib
import email

from mastodon import Mastodon

logger = logging.getLogger(__name__)


class RAAspotter:
    class RAA:
        url = ""
        date = datetime.datetime(1970, 1, 1)
        date_str = ""
        name = ""
        sha256 = ""
        pdf_creation_date = None

        def __init__(self, url, date, name):
            if not url == "":
                self.url = url
            if not date == "":
                self.date = date
                self.date_str = date.strftime("%d/%m/%Y")
            if not name == "":
                self.name = name

        def get_sha256(self):
            if (self.sha256 == ""):
                self.sha256 = hashlib.sha256(self.url.encode('utf-8')).hexdigest()
            return self.sha256

        def get_pdf_creation_date(self, data_dir):
            raa_data_dir = f'{data_dir}/raa/'

            try:
                p_pdf = open(f'{raa_data_dir}{self.get_sha256()}.pdf', 'rb')
                pdf_parser = PDFParser(p_pdf)
                pdf_creation_date_raw = PDFDocument(pdf_parser).info[0]['CreationDate'].decode('utf-8').replace('D:', '').replace('\'', '')
                if pdf_creation_date_raw:
                    try:
                        self.pdf_creation_date = datetime.datetime.strptime(pdf_creation_date_raw, '%Y%m%d%H%M%S%z')
                    except ValueError as exc:
                        self.pdf_creation_date = datetime.datetime.strptime(pdf_creation_date_raw, '%Y%m%d%H%M%S')
            except Exception as exc:
                logger.warning(f'Impossible d\'extraire la date du PDF {self.get_sha256()}.pdf')

        def extract_content(self, data_dir):
            raa_data_dir = f'{data_dir}/raa/'

            text = ""
            try:
                text = extract_text(f'{raa_data_dir}{self.get_sha256()}.ocr.pdf')
            except Exception as exc:
                logger.warning(f'ATTENTION: Impossible d\'extraire le texte du fichier {self.get_sha256()}.pdf : {exc}')

            # Écrit le texte du PDF dans un fichier texte pour une analyse future
            f = open(f'{raa_data_dir}{self.get_sha256()}.txt', 'w')
            f.write(text)
            f.close()

            # Supprime le PDF d'origine et la version OCRisée
            os.remove(f'{raa_data_dir}{self.get_sha256()}.pdf')
            os.remove(f'{raa_data_dir}{self.get_sha256()}.ocr.pdf')

        def write_properties(self, data_dir):
            raa_data_dir = f'{data_dir}/raa/'

            pdf_creation_date_json = None
            if self.pdf_creation_date:
                pdf_creation_date_json = self.pdf_creation_date.strftime("%d/%m/%Y %H:%M:%S")

            properties = {
                'name': self.name,
                'date': self.date_str,
                'url': quote(self.url, safe='/:'),
                'first_saw_on': datetime.datetime.today().strftime("%d/%m/%Y %H:%M:%S"),
                'pdf_creation_date': pdf_creation_date_json
            }
            f = open(f'{raa_data_dir}{self.get_sha256()}.json', 'w')
            f.write(json.dumps(properties))
            f.close()

        def parse(self, data_dir, not_before, keywords):
            self.get_pdf_creation_date(data_dir)
            self.write_properties(data_dir)
            self.extract_content(data_dir)

    def __init__(self, data_dir, user_agent=''):
        logger.debug('Initialisation de RAAspotter')

        # On crée le dossier de téléchargement
        os.makedirs(data_dir, exist_ok=True)

        # pdfminer.six est un peu trop verbeux en mode debug, donc on relève son niveau de log
        logging.getLogger("pdfminer").setLevel(logging.WARNING)

        self.session = requests.Session()
        self.data_dir = data_dir
        self.found = False
        self.output_file_path = os.path.dirname(os.path.abspath(__file__)) + f'/output_{self.short_code}.log'
        self.sleep_time = 0
        self.tor_enabled = False
        self.tor_max_requests = 0
        self.tor_requests = 0
        self.not_before = datetime.datetime(2024, 1, 1)
        self.smtp_configured = False
        self.mastodon = None
        self.mastodon_prefix = ''
        self.mastodon_suffix = ''

        self.update_user_agent(user_agent)

        f = open(self.output_file_path, 'w')
        f.write('')
        f.close()

    def configure_mastodon(self, access_token, instance, mastodon_prefix, mastodon_suffix):
        if access_token and access_token != "" and instance and instance != "":
            self.mastodon = Mastodon(
                access_token=access_token,
                api_base_url=instance
            )
            self.mastodon_prefix = mastodon_prefix
            self.mastodon_suffix = mastodon_suffix

    def mastodon_toot(self, content):
        if self.mastodon:
            toot = content
            if not self.mastodon_prefix == '':
                toot = f"{self.mastodon_prefix}\n\n{toot}"
            if not self.mastodon_suffix == '':
                toot = f"{toot}\n\n{self.mastodon_suffix}"
            self.mastodon.toot(toot)

    def enable_tor(self, max_requests=0):
        proxies = {
            "http": f"socks5h://127.0.0.1:9050",
            "https": f"socks5h://127.0.0.1:9050",
        }
        self.tor_enabled = True
        self.tor_max_requests = max_requests
        self.tor_requests = 0
        self.session.proxies.update(proxies)
        self.tor_get_new_id()

    def disable_tor(self):
        proxies = {}
        self.tor_enabled = False
        self.tor_max_requests = 0
        self.tor_requests = 0
        self.session.proxies.update(proxies)

    def tor_get_new_id(self):
        logger.info('Changement d\'identité Tor')
        try:
            self.session.close()
            controller = Controller.from_port(port=9051)
            controller.authenticate()
            controller.signal(Signal.NEWNYM)
            time.sleep(5)
            self.tor_requests = 0
        except Exception as exc:
            logger.debug(f'Impossible de changer d\'identité Tor: {exc}')

    def get_sub_pages(self, page_content, element, host, recursive_until_pdf):
        soup = BeautifulSoup(page_content, 'html.parser')
        sub_pages = []
        for a in soup.select(element):
            if a.get('href'):
                url = f"{host}{a['href']}"
                if recursive_until_pdf:
                    sub_page_content = self.get_page(url, 'get').content
                    if not self.has_pdf(sub_page_content):
                        logger.info(
                            f'{url} ne contient pas de PDF, on récupère ses sous-pages'
                        )
                        for sub_sub_page in self.get_sub_pages(
                            sub_page_content,
                            element,
                            host,
                            recursive_until_pdf
                        ):
                            sub_pages.append(sub_sub_page)
                    else:
                        sub_page = {
                            'url': url,
                            'name': a.get_text().strip()
                        }
                        sub_pages.append(sub_page)
                else:
                    sub_page = {
                        'url': url,
                        'name': a.get_text().strip()
                    }
                    sub_pages.append(sub_page)
        return sub_pages

    def get_sub_pages_with_pager(self, page, sub_page_element, pager_element, details_element, host):
        pages = []
        page_content = self.get_page(page, 'get').content

        # On initialise le parser
        soup = BeautifulSoup(page_content, 'html.parser')

        # On recherche les sous-pages
        sub_pages = soup.select(sub_page_element)
        sub_pages_details = None
        if details_element is not None:
            sub_pages_details = soup.select(details_element)
        i = 0
        for sub_page in sub_pages:
            if sub_page.get('href'):
                page = {
                    'url': f"{host}{sub_page['href']}",
                    'name': sub_page.get_text().strip(),
                    'details': ''
                }
                if details_element is not None:
                    page['details'] = sub_pages_details[i].get_text().strip()
                pages.append(page)
                i = i + 1

        # On recherche un pager, et si on le trouve on le suit
        pager = soup.select(pager_element)
        if pager and pager[0] and pager[0].get('href'):
            for sub_page in self.get_sub_pages_with_pager(
                f"{host}{pager[0]['href']}",
                sub_page_element,
                pager_element,
                details_element,
                host
            ):
                pages.append(sub_page)

        return pages

    def get_raa_with_pager(self, pages_list, pager_element, host):
        elements = []
        # On parse chaque page passée en paramètre
        for page in pages_list:
            page_content = self.get_page(page, 'get').content

            # Pour chaque page, on récupère les PDF
            for raa in self.get_raa_elements(page_content):
                elements.append(raa)

            # On regarde également s'il n'y aurait pas un pager
            sub_pages = []
            for sub_page in self.get_sub_pages(
                page_content,
                pager_element,
                host,
                True
            ):
                sub_pages.append(sub_page['url'])
            for sub_raa in self.get_raa_with_pager(
                sub_pages,
                pager_element,
                host
            ):
                elements.append(sub_raa)
        return elements

    def set_sleep_time(self, sleep_time):
        self.sleep_time = sleep_time

    def has_pdf(self, page_content):
        elements = []
        soup = BeautifulSoup(page_content, 'html.parser')
        for a in soup.find_all('a', href=True):
            if a['href'].endswith('.pdf'):
                return True
        return False

    # On démarre le navigateur
    def get_session(self, url, wait_element, remaining_retries=0):
        webdriver_options = webdriver.ChromeOptions()
        webdriver_options.add_argument("--no-sandbox")
        webdriver_options.add_argument("--disable-extensions")
        webdriver_options.add_argument("--disable-gpu")
        webdriver_options.add_argument("--disable-dev-shm-usage")
        webdriver_options.add_argument("--use_subprocess")
        webdriver_options.add_argument("--disable-blink-features=AutomationControlled")

        if not self.user_agent == "":
            webdriver_options.add_argument(f"--user-agent={self.user_agent}")

        webdriver_options.add_argument("--headless")
        webdriver_options.add_argument("--window-size=1024,768")
        display = Display(visible=False, size=(1024, 768))
        display.start()

        browser = webdriver.Chrome(options=webdriver_options)

        # Téléchargement de l'URL
        browser.get(url)

        if wait_element is not None:
            # On attend que le navigateur ait passé les tests anti-robots et
            # que le contenu s'affiche
            try:
                WebDriverWait(browser, 60).until(
                    expected_conditions.presence_of_element_located(
                        (
                            By.ID,
                            wait_element
                        )
                    )
                )
            except TimeoutException as exc:
                logger.warning(f'TimeoutException: {exc}')
                if remaining_retries > 0:
                    return self.get_session(url, wait_element, (remaining_retries - 1))
                else:
                    raise TimeoutException(exc)

        page_content = browser.page_source

        # On récupère les cookies du navigateur pour les réutiliser plus tard
        for cookie in browser.get_cookies():
            self.session.cookies.set(cookie['name'], cookie['value'])

        # On arrête le navigateur
        browser.quit()
        display.stop()

        return page_content

    def print_output(self, data):
        print(data)
        data = data.replace('\033[92m', '')
        data = data.replace('\033[0m', '')
        data = data.replace('\033[1m', '')
        f = open(self.output_file_path, 'a')
        f.write(data + "\n")
        f.close()

    def get_page(self, url, method, data={}):
        try:
            logger.debug(f'Chargement de la page {url}')
            if self.sleep_time > 0:
                time.sleep(self.sleep_time)

            page = None
            if method == 'get':
                page = self.session.get(url)
            if method == 'post':
                page = self.session.post(url, data=data)

            if page.status_code == 429:
                logger.info('Erreur 429 Too Many Requests reçue, temporisation...')
                self.tor_get_new_id()
                time.sleep(55)
                return self.get_page(url, method, data)

            if self.tor_enabled:
                self.tor_requests += 1
                if self.tor_max_requests > 0 and \
                   self.tor_requests > self.tor_max_requests:
                    self.tor_get_new_id()

            return page
        except requests.exceptions.ConnectionError as exc:
            logger.info(f'Erreur de connexion, temporisation...')
            self.tor_get_new_id()
            time.sleep(55)
            return self.get_page(url, method, data)

    def update_user_agent(self, user_agent):
        self.user_agent = user_agent
        self.session.headers.update({'User-Agent': self.user_agent})

    def download_file(self, raa):
        try:
            os.makedirs(
                os.path.dirname(f'{self.data_dir}/raa/{raa.get_sha256()}.pdf'),
                exist_ok=True
            )
            file = self.get_page(raa.url, 'get')
            f = open(f'{self.data_dir}/raa/{raa.get_sha256()}.pdf', 'wb')
            f.write(file.content)
            f.close()
        except (requests.exceptions.ConnectionError,
                requests.exceptions.ChunkedEncodingError):
            logger.warning(f'ATTENTION: la connexion a été interrompue pendant le téléchargement de {raa.url}, nouvelle tentative...')
            self.download_file(raa)
        except Exception as exc:
            logger.warning(f'ATTENTION: Impossible de télécharger le fichier {raa.url}: {exc}')

    def ocr(self, raa, retry_on_failure=True):
        cmd = [
            'ocrmypdf',
            '-l', 'eng+fra',
            '--output-type', 'pdf',
            '--redo-ocr',
            '--skip-big', '500',
            '--invalidate-digital-signatures',
            '--optimize', '0',
            f'{self.data_dir}/raa/{raa.get_sha256()}.pdf',
            f'{self.data_dir}/raa/{raa.get_sha256()}.ocr.pdf'
        ]
        logger.debug(f'Lancement de ocrmypdf: {cmd}')
        try:
            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as exc:
            if exc.returncode == 2 and retry_on_failure:
                logger.warning('ATTENTION : Le fichier n\'est pas un PDF correct, nouvelle tentative de le télécharger')
                if self.tor_enabled:
                    self.tor_get_new_id()
                self.download_file(raa)
                self.ocr(raa, False)
            elif (not exc.returncode == 6) and (not exc.returncode == 10) and (not exc.returncode == 4):
                logger.warning('ATTENTION : Impossible d\'OCRiser le document', exc.returncode, exc.output)
                shutil.copy(f'{self.data_dir}/raa/{raa.get_sha256()}.pdf', f'{self.data_dir}/raa/{raa.get_sha256()}.ocr.pdf')

    def search_keywords(self, raa, keywords):
        text = open(f'{self.data_dir}/raa/{raa.get_sha256()}.txt').read()

        found = False
        found_keywords = []
        for keyword in keywords:
            if re.search(keyword, text, re.IGNORECASE | re.MULTILINE):
                if not found:
                    url = quote(raa.url, safe='/:')
                    self.print_output(f'\033[92m{raa.name}\033[0m ({raa.date_str})')
                    self.print_output(f'URL : {url}')
                    found = True
                    self.found = True
                self.print_output(f'    Le terme \033[1m{keyword}\033[0m a été trouvé.')
                found_keywords.append(keyword)

        if found:
            self.print_output('')
            url = quote(raa.url, safe='/:')
            found_keywords_str = ', '.join(
                [str(x) for x in found_keywords]
            )
            self.mastodon_toot(
                f'{raa.name} ({raa.date_str})\n\nLes termes suivants ont '
                f'été trouvés : {found_keywords_str}.\n\nURL : {url}'
            )

    def parse_raa(self, elements, keywords):
        for raa in elements:
            # Si le fichier n'a pas déjà été parsé et qu'il est postérieur à la
            # date maximale d'analyse, on le télécharge et on le parse
            if raa.date >= self.not_before and not os.path.isfile(f'{self.data_dir}/raa/{raa.get_sha256()}.txt'):
                url = quote(raa.url, safe='/:')
                logger.info(f'Nouveau fichier : {raa.name} ({raa.date_str}). URL : {url}')
                self.download_file(raa)
                self.ocr(raa, True)
                raa.parse(self.data_dir, self.not_before, keywords)
                self.search_keywords(raa, keywords)

    def get_raa(self, page_content):
        logger.error('Cette fonction doit être surchargée')

    def configure_mailer(self, smtp_host, smtp_username, smtp_password,
                         smtp_port, smtp_starttls, smtp_ssl, email_from,
                         email_to, email_object):
        self.smtp_host = smtp_host
        self.smtp_username = smtp_username
        self.smtp_password = smtp_password
        if smtp_port <= 0:
            self.smtp_port = 587
        else:
            self.smtp_port = int(smtp_port)
        self.smtp_starttls = smtp_starttls
        self.smtp_ssl = smtp_ssl
        self.email_from = email_from
        self.email_to = email_to
        self.email_object = email_object

        if smtp_host and smtp_username and smtp_password and email_from and email_to and email_object:
            self.smtp_configured = True

    def mailer(self):
        if self.smtp_configured and self.found:
            try:
                message = email.message.EmailMessage()
                message.set_content(open(self.output_file_path).read())

                message['Subject'] = self.email_object
                message['From'] = self.email_from
                message['Message-ID'] = email.utils.make_msgid(domain=self.email_from.split('@')[-1])
                message['Date'] = email.utils.formatdate()

                context = ssl.create_default_context()

                if self.smtp_ssl is True:
                    for address in self.email_to.split(','):
                        del message['To']
                        message['To'] = address
                        smtp = smtplib.SMTP_SSL(self.smtp_host, port, context=context)
                        if self.smtp_username:
                            smtp.login(self.smtp_username, self.smtp_password)
                        smtp.send_message(message)
                        smtp.quit()
                elif self.smtp_starttls is True:
                    for address in self.email_to.split(','):
                        del message['To']
                        message['To'] = address
                        smtp = smtplib.SMTP(self.smtp_host)
                        smtp.starttls(context=context)
                        if self.smtp_username:
                            smtp.login(self.smtp_username, self.smtp_password)
                        smtp.send_message(message)
                        smtp.quit()
                else:
                    for address in self.email_to.split(','):
                        del message['To']
                        message['To'] = address
                        smtp = smtplib.SMTP(self.smtp_host)
                        if self.smtp_username:
                            smtp.login(self.smtp_username, self.smtp_password)
                        smtp.send_message(message)
                        smtp.quit()
            except Exception as exc:
                logger.warning(f'Impossible d\'envoyer le courrier électronique : {exc}')

    # Fonction qui essaie de deviner la date d'un RAA à partir de son nom.
    # Utile pour limiter les requêtes lors de l'obtention des RAA à scanner.
    def guess_date(string, regex):
        try:
            search = re.search(regex, string, re.IGNORECASE)
            guessed_date = dateparser.parse(search.group(1))
            if guessed_date is None:
                raise Exception('La date est un objet None')
            else:
                return guessed_date
        except Exception as exc:
            logger.warning(f'Impossible de deviner la date du terme {string} : {exc}')
            return datetime.datetime(9999, 1, 1)