Newer
Older
import datetime
from bs4 import BeautifulSoup
from urllib.parse import unquote
from RAAspotter import RAAspotter
class RAAspotter_pref976(RAAspotter):
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# Config
__HOST = 'https://www.mayotte.gouv.fr'
__RAA_PAGE = {
'default': f'{__HOST}/Publications/Recueil-des-actes-administratifs-'
'R.A.A',
'2024': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2024',
'2023': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2023',
'2022': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2022',
'2021': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2021',
'2020': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2020',
'2019': f'{__HOST}/Publications/Recueil-des-actes-administratifs-R.A.A'
'/RAA-2019'
}
__USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
full_name = 'Préfecture de Mayotte'
short_code = 'pref976'
def __init__(self, data_dir):
super().__init__(data_dir, self.__USER_AGENT)
self.enable_tor(10)
def get_raa(self, keywords):
self.print_output('RAAspotter_pref976')
self.print_output(f'Termes recherchés: {keywords}')
self.print_output('')
pages_to_parse = []
if self.not_before.year <= 2024:
pages_to_parse.append(self.__RAA_PAGE['2024'])
if self.not_before.year <= 2023:
pages_to_parse.append(self.__RAA_PAGE['2023'])
if self.not_before.year <= 2022:
pages_to_parse.append(self.__RAA_PAGE['2022'])
if self.not_before.year <= 2021:
pages_to_parse.append(self.__RAA_PAGE['2021'])
if self.not_before.year <= 2020:
pages_to_parse.append(self.__RAA_PAGE['2020'])
if self.not_before.year <= 2019:
pages_to_parse.append(self.__RAA_PAGE['2019'])
sub_pages_to_parse = [self.__RAA_PAGE['default']]
# Pour chaque année, on cherche les sous-pages de mois
for raa_page in pages_to_parse:
page_content = self.get_page(raa_page, 'get').content
month_pages = self.get_sub_pages(
page_content,
'.fr-card.fr-card--sm.fr-card--grey.fr-enlarge-link '
'div.fr-card__body div.fr-card__content '
'h2.fr-card__title a',
self.__HOST,
False
)[::-1]
# On regarde aussi si sur la page de l'année il n'y aurait pas un
# RAA mal catégorisé
for page_to_parse in self.find_raa_card(raa_page):
sub_pages_to_parse.append(page_to_parse)
# Pour chaque mois, on cherche les pages des RAA
for month_page in month_pages:
year = RAAspotter.guess_date(month_page['name'], '(.*)').year
for page_to_parse in self.find_raa_card(
month_page['url'],
year
):
sub_pages_to_parse.append(page_to_parse)
# On parse les pages contenant des RAA
for page in sub_pages_to_parse:
page_content = self.get_page(page, 'get').content
raa_elements = self.get_raa_elements(page_content)
self.parse_raa(raa_elements, keywords.split(','))
self.mailer()
def find_raa_card(self, page, year=None):
pages = []
card_pages = self.get_sub_pages_with_pager(
page,
'div.fr-card__body div.fr-card__content h2.fr-card__title '
'a.fr-card__link',
'ul.fr-pagination__list li '
'a.fr-pagination__link.fr-pagination__link--next',
self.__HOST
)[::-1]
for card_page in card_pages:
# On filtre les pages de RAA ne correspondant pas à la période
# analysée
guessed_date = RAAspotter.guess_date(
card_page['name'],
'n°[ 0-9]* du ([0-9]*(?:er)? [a-zéû]* [0-9]*)'
)
if year:
guessed_date = guessed_date.replace(year=year)
if guessed_date >= self.not_before:
pages.append(card_page['url'])
return pages
def get_raa_elements(self, page_content):
elements = []
# On charge le parser
soup = BeautifulSoup(page_content, 'html.parser')
# On récupère chaque balise a
for a in soup.select('a.fr-link.fr-link--download'):
if a.get('href') and a['href'].endswith('.pdf'):
if a['href'].startswith('/'):
url = f"{self.__HOST}{a['href']}"
else:
url = a['href']
url = unquote(url)
name = a.find('span').previous_sibling.replace(
'Télécharger ',
''
).strip()
date = datetime.datetime.strptime(
a.find('span').get_text().split(' - ')[-1].strip(),
'%d/%m/%Y'
)
filename = url.split('/')[-1]
raa = RAAspotter.RAA(url, date, name, filename)
elements.append(raa)
return elements