From 4522c74502ce8b36d62768aea9fddfdd5000916d Mon Sep 17 00:00:00 2001
From: Bastien Le Querrec <blq@laquadrature.net>
Date: Mon, 6 Jan 2025 19:41:51 +0100
Subject: [PATCH] pref13: utilise prefdpt

---
 Attrap_pref13.py | 64 +++++-------------------------------------------
 1 file changed, 6 insertions(+), 58 deletions(-)

diff --git a/Attrap_pref13.py b/Attrap_pref13.py
index 758519b..a5c32ad 100644
--- a/Attrap_pref13.py
+++ b/Attrap_pref13.py
@@ -1,70 +1,18 @@
-import os
-import datetime
+from Attrap_prefdpt import Attrap_prefdpt
 
-from bs4 import BeautifulSoup
-from urllib.parse import unquote
 
-from Attrap import Attrap
+class Attrap_pref13(Attrap_prefdpt):
 
-
-class Attrap_pref13(Attrap):
-
-    # Config
+    # Configuration de la préfecture
     hostname = 'https://www.bouches-du-rhone.gouv.fr'
     raa_page = [
         f'{hostname}/Publications/RAA-et-Archives',
         f'{hostname}/Publications/RAA-et-Archives/Archives-RAA-des-Bouches-du-Rhone'
     ]
-    user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/115.0'
     full_name = 'Préfecture des Bouches-du-Rhône'
     short_code = 'pref13'
     timezone = 'Europe/Paris'
 
-    def __init__(self, data_dir):
-        super().__init__(data_dir, self.user_agent)
-        self.set_sleep_time(30)
-
-    def get_raa(self, keywords):
-        pages_to_parse = []
-        for raa_page in self.raa_page:
-            page_content = self.get_page(raa_page, 'get').content
-            year_pages = self.get_sub_pages(
-                page_content,
-                '.fr-card.fr-card--sm.fr-card--grey.fr-enlarge-link div.fr-card__body div.fr-card__content h2.fr-card__title a',
-                self.hostname,
-                False
-            )
-            for year_page in year_pages:
-                year = Attrap.guess_date(year_page['name'], '.*([0-9]{4})').year
-                if year >= self.not_before.year and year < 9999:
-                    pages_to_parse.append(year_page['url'])
-
-        elements = []
-        for page_to_parse in pages_to_parse:
-            page_content = self.get_page(page_to_parse, 'get').content
-            for element in self.get_raa_elements(page_content):
-                elements.append(element)
-
-        self.parse_raa(elements, keywords)
-        self.mailer()
-
-    def get_raa_elements(self, page_content):
-        elements = []
-        # On charge le parser
-        soup = BeautifulSoup(page_content, 'html.parser')
-
-        # Pour chaque balise a, on regarde si c'est un PDF, et si oui on le parse
-        for a in soup.select('a.fr-link.fr-link--download'):
-            if a.get('href') and a['href'].endswith('.pdf'):
-                if a['href'].startswith('/'):
-                    url = f"{self.hostname}{a['href']}"
-                else:
-                    url = a['href']
-
-                url = unquote(url)
-                name = a.find('span').previous_sibling.replace('Télécharger ', '').strip()
-                date = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y')
-
-                raa = Attrap.RAA(url, date, name, timezone=self.timezone)
-                elements.append(raa)
-        return elements
+    # Configuration des widgets à analyser
+    Attrap_prefdpt.grey_card['regex']['year'] = 'RAA[- ]*([0-9]{4})'
+    Attrap_prefdpt.grey_card['follow_link_on_unrecognised_date'] = False
-- 
GitLab