Newer
Older
from bs4 import BeautifulSoup
from urllib.parse import unquote
from RAAspotter import RAAspotter
class RAAspotter_ppparis(RAAspotter):
# Config
__RAA_PAGE = 'https://www.prefecturedepolice.interieur.gouv.fr/actualites-et-presse/arretes/accueil-arretes'
__WAIT_ELEMENT = 'block-decree-list-block'
__USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
def __init__(self, data_dir):
super().__init__(data_dir)
self.user_agent = self.__USER_AGENT
def get_raa(self, keywords):
self.print_output('RAAspotter_ppparis')
self.print_output(f'Termes recherchés: {keywords}')
page_content = self.get_session()
raa_elements = self.get_raa_elements(page_content)
self.parse_raa(raa_elements, keywords.split(','))
def get_raa_elements(self, page_content):
elements = []
# On charge le parser
soup = BeautifulSoup(page_content, 'html.parser')
# Pour chaque balise a, on regarde si c'est un PDF, et si oui on le parse
for a in soup.find_all('a', href=True):
if a['href'].endswith('.pdf'):
if a['href'].startswith('/'):
url = 'https://www.prefecturedepolice.interieur.gouv.fr'+a['href']
else:
url = a['href']
name = a.find('span').get_text()
date = a.find('div', class_="field--type-datetime").get_text()
filename = unquote(url.split('/')[-1])
raa = RAAspotter.RAA(url, date, name, filename)
elements.append(raa)
return elements
def get_session(self):
return super().get_session(self.__RAA_PAGE, self.__WAIT_ELEMENT)