Bifurcation depuis
La Quadrature du Net / Attrap
208 validations de retard le dépôt en amont.
-
Bastien Le Querrec a rédigé
Première étape du support multi-recherches
Bastien Le Querrec a rédigéPremière étape du support multi-recherches
RAAspotter_pref976.py 4,95 Kio
import os
import datetime
from bs4 import BeautifulSoup
from urllib.parse import unquote
from RAAspotter import RAAspotter
class RAAspotter_pref976(RAAspotter):
# Config
__HOST = 'https://www.mayotte.gouv.fr'
__RAA_PAGE = {
'default': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A',
'2024': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2024',
'2023': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2023',
'2022': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2022',
'2021': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2021',
'2020': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2020',
'2019': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A/RAA-2019'
}
__USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
full_name = 'Préfecture de Mayotte'
short_code = 'pref976'
def __init__(self, data_dir):
super().__init__(data_dir, self.__USER_AGENT)
self.enable_tor(10)
def get_raa(self, keywords):
self.print_output('RAAspotter_pref976')
self.print_output(f'Termes recherchés: {keywords}')
self.print_output('')
pages_to_parse = []
if self.not_before.year <= 2024:
pages_to_parse.append(self.__RAA_PAGE['2024'])
if self.not_before.year <= 2023:
pages_to_parse.append(self.__RAA_PAGE['2023'])
if self.not_before.year <= 2022:
pages_to_parse.append(self.__RAA_PAGE['2022'])
if self.not_before.year <= 2021:
pages_to_parse.append(self.__RAA_PAGE['2021'])
if self.not_before.year <= 2020:
pages_to_parse.append(self.__RAA_PAGE['2020'])
if self.not_before.year <= 2019:
pages_to_parse.append(self.__RAA_PAGE['2019'])
sub_pages_to_parse = [self.__RAA_PAGE['default']]
# Pour chaque année, on cherche les sous-pages de mois
for raa_page in pages_to_parse:
page_content = self.get_page(raa_page, 'get').content
month_pages = self.get_sub_pages(
page_content,
'.fr-card.fr-card--sm.fr-card--grey.fr-enlarge-link div.fr-card__body div.fr-card__content h2.fr-card__title a',
self.__HOST,
False
)[::-1]
# On regarde aussi si sur la page de l'année il n'y aurait pas un
# RAA mal catégorisé
for page_to_parse in self.find_raa_card(raa_page):
sub_pages_to_parse.append(page_to_parse)
# Pour chaque mois, on cherche les pages des RAA
for month_page in month_pages:
year = RAAspotter.guess_date(month_page['name'], '(.*)').year
for page_to_parse in self.find_raa_card(
month_page['url'],
year
):
sub_pages_to_parse.append(page_to_parse)
# On parse les pages contenant des RAA
for page in sub_pages_to_parse:
page_content = self.get_page(page, 'get').content
raa_elements = self.get_raa_elements(page_content)
self.parse_raa(raa_elements, keywords.split(','))
self.mailer()
def find_raa_card(self, page, year=None):
pages = []
card_pages = self.get_sub_pages_with_pager(
page,
'div.fr-card__body div.fr-card__content h2.fr-card__title a.fr-card__link',
'ul.fr-pagination__list li a.fr-pagination__link.fr-pagination__link--next',
None,
self.__HOST
)[::-1]
for card_page in card_pages:
# On filtre les pages de RAA ne correspondant pas à la période
# analysée
guessed_date = RAAspotter.guess_date(card_page['name'], 'n°[ 0-9]* du ([0-9]*(?:er)? [a-zéû]* [0-9]*)')
if year:
guessed_date = guessed_date.replace(year=year)
if guessed_date >= self.not_before:
pages.append(card_page['url'])
return pages
def get_raa_elements(self, page_content):
elements = []
# On charge le parser
soup = BeautifulSoup(page_content, 'html.parser')
# On récupère chaque balise a
for a in soup.select('a.fr-link.fr-link--download'):
if a.get('href') and a['href'].endswith('.pdf'):
if a['href'].startswith('/'):
url = f"{self.__HOST}{a['href']}"
else:
url = a['href']
url = unquote(url)
name = a.find('span').previous_sibling.replace('Télécharger ', '').strip()
date = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y')
raa = RAAspotter.RAA(url, date, name)
elements.append(raa)
return elements