import os, sys, re
import datetime
import logging

from bs4 import BeautifulSoup
from urllib.parse import unquote

from RAAspotter import RAAspotter

logger = logging.getLogger(__name__)

class RAAspotter_pref38(RAAspotter):

  # Config
  __HOST       = 'https://www.isere.gouv.fr'
  __RAA_PAGE   = {'2024': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2024',
                  '2023': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2023',
                  '2022': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Archives/Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2022',
                  '2021': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Archives/Archives-des-recueils-des-actes-administratifs-de-la-prefecture-de-l-Isere-2021/Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2021',
                  '2020': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Archives/Archives-des-recueils-des-actes-administratifs-de-la-prefecture-de-l-Isere-2020/Recueils-des-Actes-Administratifs-de-la-Prefecture-de-l-Isere-2020',
                  '2019': f'{__HOST}/Publications/RAA-Recueil-des-actes-administratifs/Archives/Archives-des-Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2019/Archives-des-Recueils-des-Actes-Administratifs-de-la-prefecture-de-l-Isere-2019'}
  __USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0'
  full_name = 'Préfecture de l\'Isère'
  short_code = 'pref38'

  def __init__(self, data_dir):
    super().__init__(data_dir, self.__USER_AGENT)
    self.enable_tor(20)

  def get_raa(self, keywords):
    self.print_output('RAAspotter_pref38')
    self.print_output(f'Termes recherchés: {keywords}')
    self.print_output('')

    pages_to_parse = []
    if self.not_before.year <= 2024:
      pages_to_parse.append(self.__RAA_PAGE['2024'])
    if self.not_before.year <= 2023:
      pages_to_parse.append(self.__RAA_PAGE['2023'])
    if self.not_before.year <= 2022:
      pages_to_parse.append(self.__RAA_PAGE['2022'])
    if self.not_before.year <= 2021:
      pages_to_parse.append(self.__RAA_PAGE['2021'])
    if self.not_before.year <= 2020:
      pages_to_parse.append(self.__RAA_PAGE['2020'])
    if self.not_before.year <= 2019:
      pages_to_parse.append(self.__RAA_PAGE['2019'])

    for raa_page in pages_to_parse:
      page_content = self.get_page(raa_page, 'get').content
      raa_elements = self.get_raa_elements(page_content, raa_page)
      self.parse_raa(raa_elements, keywords.split(','))
    self.mailer()

  def get_raa_elements(self, page_content, raa_page):
    elements = []
    # On charge le parser
    soup = BeautifulSoup(page_content, 'html.parser')

    # On récupère le select qui contient la liste des RAA
    select_list = soup.select('select#-liste-docs')[0]
    # On analyse chaque résultat
    for option in select_list.find_all('option'):
      if not option['value'] == "":
        # On estime la date à partir du nom de fichier
        guessed_date = RAAspotter.guess_date(option['title'], '.* n°[ 0-9]* du ([0-9]*(?:er)? [a-zéû]* [0-9]*)')
        
        # Si la date estimée correspond à la plage d'analyse, on demande au serveur les détails du RAA
        if guessed_date >= self.not_before:
          page_content = self.get_page(raa_page, 'post', {'-liste-docs':option['value']}).content
          # On parse la page de détails pour obtenir les propriétés du RAA
          soup = BeautifulSoup(page_content, 'html.parser')
          a = soup.select('div.liste_deroulante a.fr-link.fr-link--download')[0]
          
          # Si la page contient une balise a qui renvoie vers un pdf, c'est qu'on a obtenu les détails du RAA demandé, donc on le parse
          if a.get('href') and a['href'].endswith('.pdf'):
            if a['href'].startswith('/'):
              url = f"{self.__HOST}{a['href']}"
            else:
              url = a['href']

            url      = unquote(url)
            name     = a.find('span').previous_sibling.replace('Télécharger ', '').strip()
            date     = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y')
            filename = url.split('/')[-1]

            raa = RAAspotter.RAA(url, date, name, filename)
            elements.append(raa)
    return elements