Skip to content
Extraits de code Groupes Projets
Sélectionner une révision Git
  • fcf68e545c25bb87b29eba631705a07732f7a6fd
  • main par défaut protégée
2 résultats

Attrap_pref62.py

Blame
  • Attrap_pref62.py 2,64 Kio
    import os
    import datetime
    
    from bs4 import BeautifulSoup
    from urllib.parse import unquote
    
    from Attrap import Attrap
    
    
    class Attrap_pref62(Attrap):
    
        # Config
        __HOST = 'https://www.pas-de-calais.gouv.fr'
        __RAA_PAGE = f'{__HOST}/Publications/Recueil-des-actes-administratifs'
        __USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/115.0'
        full_name = 'Préfecture du Pas-de-Calais'
        short_code = 'pref62'
    
        def __init__(self, data_dir):
            super().__init__(data_dir, self.__USER_AGENT)
            self.set_sleep_time(30)
    
        def get_raa(self, keywords):
            pages_to_parse = []
    
            # On détermine quelles pages d'année parser
            year_pages = self.get_sub_pages_with_pager(
                self.__RAA_PAGE,
                'div.fr-card.fr-card--horizontal.fr-card--sm.fr-enlarge-link.fr-mb-3w div.fr-card__body div.fr-card__content h2.fr-card__title a.fr-card__link',
                'ul.fr-pagination__list li a.fr-pagination__link.fr-pagination__link--next.fr-pagination__link--lg-label',
                'div.fr-card.fr-card--horizontal.fr-card--sm.fr-enlarge-link.fr-mb-3w div.fr-card__body div.fr-card__content div.fr-card__end p.fr-card__detail',
                self.__HOST
            )
            for year_page in year_pages:
                if Attrap.guess_date(year_page['name'].strip(), '([0-9]{4}).*').year >= self.not_before.year:
                    pages_to_parse.append(year_page['url'])
    
            elements = []
            for raa_page in pages_to_parse:
                page_content = self.get_page(raa_page, 'get').content
                for element in self.get_raa_elements(page_content):
                    elements.append(element)
    
            self.parse_raa(elements, keywords)
            self.mailer()
    
        def get_raa_elements(self, page_content):
            elements = []
            # On charge le parser
            soup = BeautifulSoup(page_content, 'html.parser')
    
            # On récupère le div qui contient la liste des RAA
            cards = soup.select('div.fr-downloads-group.fr-downloads-group--bordered')[0]
            # On analyse chaque balise a dans ce div
            for a in cards.find_all('a', href=True):
                if a['href'].endswith('.pdf'):
                    if a['href'].startswith('/'):
                        url = f"{self.__HOST}{a['href']}"
                    else:
                        url = a['href']
    
                    url = unquote(url)
                    name = a.find('span').previous_sibling.replace('Télécharger ', '').strip()
                    date = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y')
    
                    raa = Attrap.RAA(url, date, name)
                    elements.append(raa)
            return elements[::-1]