From 24ea416cb6a7e51a8e26e3b9550e8ad52cf082f1 Mon Sep 17 00:00:00 2001 From: Aurel <aurel.pere@ikmail.com> Date: Fri, 7 Jun 2024 16:01:23 +0200 Subject: [PATCH] update cli.py,Makefile,comment old years pager crawling --- Attrap_pref47.py | 123 +++++++++++++++++++++++++++++++++++++++++++++++ Makefile | 4 +- cli.py | 1 + 3 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 Attrap_pref47.py diff --git a/Attrap_pref47.py b/Attrap_pref47.py new file mode 100644 index 0000000..209fec9 --- /dev/null +++ b/Attrap_pref47.py @@ -0,0 +1,123 @@ +import os +import datetime + +from bs4 import BeautifulSoup +from urllib.parse import unquote + +from Attrap import Attrap + + +class Attrap_pref47(Attrap): + + # Config + __HOST = 'https://www.lot-et-garonne.gouv.fr' + __RAA_PAGE = f'{__HOST}/Publications/Publications-legales/RAA' + __USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0' + full_name = 'Préfecture du Lot-et-garonne' + short_code = 'pref47' + + def __init__(self, data_dir): + super().__init__(data_dir, self.__USER_AGENT) + self.enable_tor(10) + self.not_before = datetime.datetime(2020, 1, 1) # annee de differenciation pager + + + #supercharge pour classes css avec whitespaces + def get_sub_pages_with_pager(self, page, sub_page_element, pager_element, details_element, host): + pages = [] + page_content = self.get_page(page, 'get').content + + # On initialise le parser + soup = BeautifulSoup(page_content, 'html.parser') + + # On recherche les sous-pages + sub_pages = soup.select(sub_page_element) + sub_pages_details = None + if details_element is not None: + sub_pages_details = soup.select(details_element) + i = 0 + for sub_page in sub_pages: + print(sub_page) + if sub_page.get('href'): + page = { + 'url': f"{host}{sub_page['href']}", + 'name': sub_page.get_text().strip(), + 'details': '' + } + if details_element is not None: + page['details'] = sub_pages_details[i].get_text().strip() + pages.append(page) + i = i + 1 + + # On recherche un pager, et si on le trouve on le suit + # modif ici, le parametre pager_element ne doit contenir + # que le contenu de la classe + # et pas l'element pour les classes avec whitespaces + pager = soup.find_all("a",class_=pager_element) + print(pager) + if pager and len(pager)>0 and pager[0].get('href'): + for sub_page in self.get_sub_pages_with_pager( + f"{host}{pager[0]['href']}", + sub_page_element, + pager_element, + details_element, + host + ): + pages.append(sub_page) + + return pages + + def get_raa(self, keywords): + elements = [] + page_content = self.get_page(self.__RAA_PAGE, 'get').content + soup = BeautifulSoup(page_content, 'html.parser') + for a in soup.select('div.fr-card__body div.fr-card__content h2.fr-card__title a'): + # Annees sans pager + if Attrap.guess_date(f'{self.__HOST}{a.get_text().strip()}', '([0-9]{4}).*').year >= self.not_before.year: + page_content = self.get_page(f"{self.__HOST}{a['href']}", 'get').content + for sub_page in self.get_sub_pages(page_content, + 'div.fr-card__body div.fr-card__content h2.fr-card__title a', + self.__HOST, + True): + sub_page_content = self.get_page(sub_page['url'], 'get').content + for element in self.get_raa_elements(sub_page_content): + elements.append(element) + #les raa de 2019 et années précédentes ont des pagers + #else: + # page_content = self.get_page(f"{self.__HOST}{a['href']}", 'get').content + # for sub_page in self.get_sub_pages_with_pager(f"{self.__HOST}{a['href']}", 'div.fr-card__body div.fr-card__content h2.fr-card__title a', 'a.fr-pagination__link fr-pagination__link--next fr-pagination__link--lg-label', None, self.__HOST): + # sub_page_content = self.get_page(sub_page['url'], 'get').content + # for element in self.get_raa_elements(sub_page_content): + # elements.append(element) + + #bug sur ocrmypdf sur mon ubuntu 20.04 (test avec arch prochainement) + #sur --invalidate-digital-signatures bien que dans la doc + #ici https://ocrmypdf.readthedocs.io/en/latest/pdfsecurity.html + self.parse_raa(elements, keywords) + self.mailer() + + def get_raa_elements(self, page_content): + elements = [] + # On charge le parser + soup = BeautifulSoup(page_content, 'html.parser') + + # Pour chaque balise a, on regarde si c'est un PDF, et si oui on le + # parse + print(soup.find_all("a",{"id":'class="fr-link'})) + print(len(soup.find_all("a",{"id":'class="fr-link'}))) + for a in soup.find_all("a",{"id":'class="fr-link'}): + if a.get('href') and a['href'].endswith('.pdf'): + if a['href'].startswith('/'): + url = f"{self.__HOST}{a['href']}" + else: + url = a['href'] + + url = unquote(url) + name = a.find('span').previous_sibling.replace('Télécharger ', '').strip() + date = datetime.datetime.strptime(a.find('span').get_text().split(' - ')[-1].strip(), '%d/%m/%Y') + raa = Attrap.RAA(url, date, name) + self.download_file(raa) + elements.append(raa) + print(elements) + return elements + diff --git a/Makefile b/Makefile index dbbd7dd..54f6736 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -make: ppparis pref04 pref05 pref06 pref09 pref13 pref31 pref33 pref34 pref35 pref38 pref42 pref44 pref59 pref62 pref63 pref64 pref65 pref66 pref69 pref80 pref81 pref83 pref87 pref93 pref976 +make: ppparis pref04 pref05 pref06 pref09 pref13 pref31 pref33 pref34 pref35 pref38 pref42 pref44 pref47 pref59 pref62 pref63 pref64 pref65 pref66 pref69 pref80 pref81 pref83 pref87 pref93 pref976 ppparis: bin/python3 cli.py ppparis pref04: @@ -25,6 +25,8 @@ pref42: bin/python3 cli.py pref42 pref44: bin/python3 cli.py pref44 +pref47: + bin/python3 cli.py pref47 pref59: bin/python3 cli.py pref59 pref62: diff --git a/cli.py b/cli.py index 275bb16..8191d43 100755 --- a/cli.py +++ b/cli.py @@ -52,6 +52,7 @@ available_administrations = [ 'pref38', 'pref42', 'pref44', + 'pref47', 'pref59', 'pref62', 'pref63', -- GitLab