import os
import datetime

from bs4 import BeautifulSoup
from urllib.parse import unquote
import re
from Attrap import Attrap


class Attrap_pref46(Attrap):

    # Config
    __HOST = 'https://www.aveyron.gouv.fr'
    __RAA_PAGE = f'{__HOST}/Publications/Recueil-des-actes-administratifs'
    __USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0'
    full_name = "Préfecture de l'Aveyron"
    short_code = 'pref12'

    def __init__(self, data_dir):
        super().__init__(data_dir, self.__USER_AGENT)
        self.enable_tor(10)
        self.not_before = datetime.datetime(2020, 1, 1)# avant 2020 page "archives"
    #supercharge pour classes css avec whitespaces
    def get_sub_pages_with_pager(self, page, sub_page_element, pager_element, details_element, host):
        pages = []
        page_content = self.get_page(page, 'get').content

        # On initialise le parser
        soup = BeautifulSoup(page_content, 'html.parser')

        # On recherche les sous-pages
        sub_pages = soup.select(sub_page_element)
        sub_pages_details = None
        if details_element is not None:
            sub_pages_details = soup.select(details_element)
        i = 0
        for sub_page in sub_pages:
            print(sub_page)
            if sub_page.get('href'):
                page = {
                    'url': f"{host}{sub_page['href']}",
                    'name': sub_page.get_text().strip(),
                    'details': ''
                }
                if details_element is not None:
                    page['details'] = sub_pages_details[i].get_text().strip()
                pages.append(page)
                i = i + 1

        # On recherche un pager, et si on le trouve on le suit
        # modif ici, le parametre pager_element ne doit contenir 
        # que le contenu de la classe
        # et pas l'element pour les classes avec whitespaces
        pager = soup.find_all("a",class_=pager_element)
        print(pager)
        if pager and len(pager)>0 and pager[0].get('href'):
            for sub_page in self.get_sub_pages_with_pager(
                f"{host}{pager[0]['href']}",
                sub_page_element,
                pager_element,
                details_element,
                host
            ):
                pages.append(sub_page)

        return pages

    def get_raa(self, keywords):
        elements = []
        page_content = self.get_page(self.__RAA_PAGE, 'get').content
        soup = BeautifulSoup(page_content, 'html.parser')
        #selection des grey cards
        for a in soup.select('div.fr-card--grey div.fr-card__body div.fr-card__content h2.fr-card__title a'):
            #regular
            if Attrap.guess_date(f'{a.get_text().strip()}', '([0-9]{4}).*').year >= self.not_before.year and "Archives" not in f'{a.get_text().strip()}':
                page_content = self.get_page(f"{self.__HOST}{a['href']}", 'get').content
                for sub_page in self.get_sub_pages_with_pager(f"{self.__HOST}{a['href']}", 'div.fr-card__body div.fr-card__content h2.fr-card__title a', 'fr-pagination__link fr-pagination__link--next fr-pagination__link--lg-label', None, self.__HOST):
                    sub_page_content = self.get_page(sub_page['url'], 'get').content
                    for element in self.get_raa_elements(sub_page_content):
                        elements.append(element)
            #archives
            elif self.not_before.year<2021 and "Archives" in f'{a.get_text().strip()}':
                page_content = self.get_page(f"{self.__HOST}{a['href']}", 'get').content
                for sub_page in self.get_sub_pages(page_content,
            'div.fr-card__body div.fr-card__content h2.fr-card__title a',
            self.__HOST,
            True):
                    sub_page_content = self.get_page(sub_page['url'], 'get').content
                    for a in sub_page_content.select('div.fr-card__body div.fr-card__content h2.fr-card__title a'):
                        sub_sub_page_content = self.get_page(a['url'], 'get').content
                        for element in self.get_raa_elements(sub_sub_page_content):
                            elements.append(element)
        #selection des "spécials"
        for div in soup.select("div.fr-card.fr-card--horizontal.fr-card--sm.fr-enlarge-link.fr-mb-3w"):
            for a in div.select("div.fr-card__body div.fr-card__content h2.fr-card__title a"):
                print(a)
                search_pattern=re.search('(?<=Publié le).*',f'{a.parent.parent.get_text()}')
                if search_pattern:
                    if Attrap.guess_date(search_pattern[0], '([0-9]{4}).*').year>=self.not_before.year:
                        page_content = self.get_page(f"{self.__HOST}{a['href']}", 'get').content
                        for sub_page in self.get_sub_pages(page_content,
            'div.fr-card__body div.fr-card__content h2.fr-card__title a',
            self.__HOST,
            True):
                            sub_page_content = self.get_page(sub_page['url'], 'get').content
                            for element in self.get_raa_elements(sub_page_content):
                                elements.append(element)
        #bug sur ocrmypdf sur mon ubuntu 20.04 (test avec arch prochainement)
        #sur --invalidate-digital-signatures bien que dans la doc
        #ici https://ocrmypdf.readthedocs.io/en/latest/pdfsecurity.html
        self.parse_raa(elements, keywords) 
        self.mailer()

    def get_raa_elements(self, page_content):
        elements = []
        # On charge le parser
        soup = BeautifulSoup(page_content, 'html.parser')

        # Pour chaque balise a, on regarde si c'est un PDF, et si oui on le
        # parse
        print(soup.find_all("a",{"id":'class="fr-link'}))
        print(len(soup.find_all("a",{"id":'class="fr-link'})))
        for a in soup.find_all("a",{"id":'class="fr-link'}):
            if a.get('href') and a['href'].endswith('.pdf'):
                if a['href'].startswith('/'):
                    url = f"{self.__HOST}{a['href']}"
                else:
                    url = a['href']

                url = unquote(url)
                name = a.find('span').previous_sibling.replace('Télécharger ', '').strip()
                date = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y')
                raa = Attrap.RAA(url, date, name)
                self.download_file(raa)
                elements.append(raa)
        print(elements)
        return elements