-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
336 lines (271 loc) · 10.9 KB
/
utils.py
File metadata and controls
336 lines (271 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
"""Utility functions for parsing Subito.it pages."""
import re
import logging
from typing import List, Optional, Tuple
from bs4 import BeautifulSoup
from config import BASE_URL, PROPERTY_TYPE_MAP, CONTRACT_TYPE_MAP
from models import Property
logger = logging.getLogger(__name__)
def extract_listing_id(url: str) -> Optional[str]:
"""Extract listing ID from URL."""
# URL format: /{category}/{slug}-{city}-{id}.htm
match = re.search(r'-(\d+)\.htm$', url)
if match:
return match.group(1)
return None
def extract_property_type_from_url(url: str) -> Optional[str]:
"""Extract property type from URL path."""
for key, value in PROPERTY_TYPE_MAP.items():
if f"/{key}/" in url:
return value
return None
def extract_contract_type_from_url(url: str) -> Optional[str]:
"""Extract contract type from URL path."""
for key, value in CONTRACT_TYPE_MAP.items():
if f"/{key}/" in url:
return value
return None
def parse_price(text: str) -> Optional[str]:
"""Parse price from text."""
if not text:
return None
# Remove non-breaking spaces and clean
text = text.replace('\xa0', ' ').strip()
# Extract price with currency
match = re.search(r'([\d.,]+)\s*€', text)
if match:
return match.group(0).strip()
return text.strip() if text else None
def parse_area(text: str) -> Optional[str]:
"""Parse area from text (e.g., '70 mq')."""
if not text:
return None
match = re.search(r'([\d.,]+)\s*m[q²]', text, re.IGNORECASE)
if match:
return match.group(1).replace(',', '.')
return None
def parse_rooms(text: str) -> Optional[str]:
"""Parse number of rooms (e.g., '3 Locali')."""
if not text:
return None
match = re.search(r'(\d+)\s*[Ll]ocal[ei]', text)
if match:
return match.group(1)
return None
def parse_floor(text: str) -> Optional[str]:
"""Parse floor number (e.g., '2° Piano')."""
if not text:
return None
match = re.search(r'(\d+)[°º]?\s*[Pp]iano', text)
if match:
return match.group(1)
# Check for ground floor
if 'terra' in text.lower() or 'piano terra' in text.lower():
return '0'
return None
def parse_bathrooms(text: str) -> Optional[str]:
"""Parse number of bathrooms (e.g., '2 Bagni')."""
if not text:
return None
match = re.search(r'(\d+)\s*[Bb]agn[oi]', text)
if match:
return match.group(1)
return None
def parse_location(text: str) -> Tuple[Optional[str], Optional[str]]:
"""Parse location into city and province (e.g., 'Cuneo (CN)')."""
if not text:
return None, None
# Pattern: City (Province)
match = re.match(r'(.+?)\s*\(([A-Z]{2})\)', text.strip())
if match:
return match.group(1).strip(), match.group(2).strip()
return text.strip(), None
def parse_listing_urls(html: str) -> List[str]:
"""Extract property URLs from listing page."""
urls = set()
# Use regex to find all property URLs directly
# Pattern: /category/slug-city-id.htm (where id is 6-10 digits)
pattern = r'(?:https?://www\.subito\.it)?/([a-z-]+)/([a-z0-9-]+-\d{6,10}\.htm)'
matches = re.findall(pattern, html)
for category, path in matches:
# Only include property categories, not search pages
if category in PROPERTY_TYPE_MAP or category == 'immobili':
full_url = f"{BASE_URL}/{category}/{path}"
urls.add(full_url)
return list(urls)
def parse_total_pages(html: str, default_max: int = 10) -> int:
"""Extract total number of pages from listing page.
Subito.it uses JavaScript-based pagination without traditional href links.
Returns a default max pages value for iteration-based scraping.
"""
# Try to find total results count in the page
# Pattern like "12.345 annunci" or "1.234 risultati"
match = re.search(r'([\d.]+)\s*(?:annunci|risultati|immobili)', html, re.IGNORECASE)
if match:
try:
total_count = int(match.group(1).replace('.', ''))
# Estimate pages (approximately 20-30 listings per page)
estimated_pages = (total_count // 25) + 1
return min(estimated_pages, default_max)
except ValueError:
pass
# Default to max pages for iteration
return default_max
def parse_property_details(html: str, url: str) -> Property:
"""Parse property details from detail page."""
soup = BeautifulSoup(html, 'lxml')
prop = Property(url=url)
# Extract listing ID from URL
prop.listing_id = extract_listing_id(url)
# Extract property type and contract type from URL
prop.property_type = extract_property_type_from_url(url)
prop.contract_type = extract_contract_type_from_url(url)
# Title - from h1 heading
title_elem = soup.find('h1')
if title_elem:
prop.title = title_elem.get_text(strip=True)
# Get full page text for pattern matching
page_text = soup.get_text(' ', strip=True)
# Parse price - look for the main price pattern
price_match = re.search(r'(\d{1,3}(?:[.,]\d{3})*)\s*€(?:\s*/\s*mese)?', page_text)
if price_match:
prop.price = price_match.group(0).strip()
# Parse location from breadcrumb or location text
# Look for pattern like "City (XX)" where XX is province code
loc_patterns = [
r'([A-Za-zÀ-ÿ\s\'-]+)\s*\(([A-Z]{2})\)',
]
for pattern in loc_patterns:
match = re.search(pattern, page_text)
if match:
city = match.group(1).strip()
# Avoid matching title as city
if len(city) < 50 and city != prop.title:
prop.city = city
prop.province = match.group(2).strip()
break
# Parse area (mq)
area_match = re.search(r'(\d+)\s*mq', page_text)
if area_match:
prop.living_area = area_match.group(1)
# Parse rooms (Locali)
rooms_match = re.search(r'(\d+)\s*[Ll]ocal[ei]', page_text)
if rooms_match:
prop.rooms = rooms_match.group(1)
# Parse floor (Piano)
floor_match = re.search(r'(\d+)[°º]?\s*[Pp]iano', page_text)
if floor_match:
prop.floor = floor_match.group(1)
elif 'Piano terra' in page_text or 'piano terra' in page_text:
prop.floor = '0'
# Parse bathrooms (Bagni)
bath_match = re.search(r'(\d+)\s*[Bb]agn[oi]', page_text)
if bath_match:
prop.bathrooms = bath_match.group(1)
# Parse condition (Stato)
cond_patterns = [
r'Stato\s*[:\-]?\s*(Nuovo|Nuova costruzione|Ristrutturato|Buono|Da ristrutturare|Ottimo)',
r'(Nuova costruzione|Ristrutturato|Da ristrutturare)',
]
for pattern in cond_patterns:
match = re.search(pattern, page_text, re.IGNORECASE)
if match:
prop.condition = match.group(1).strip()
break
# Parse energy class
energy_match = re.search(r'Classe energetica\s*[:\-]?\s*([A-G][1-4]?[\+]*)', page_text, re.IGNORECASE)
if energy_match:
prop.energy_class = energy_match.group(1).strip().upper()
# Parse heating
heat_match = re.search(r'Riscaldamento\s*[:\-]?\s*(Autonomo|Centralizzato|Condominiale)', page_text, re.IGNORECASE)
if heat_match:
prop.heating = heat_match.group(1).strip()
# Parse parking
park_match = re.search(r'Parcheggio\s*[:\-]?\s*([^,\n]+)', page_text)
if park_match:
parking = park_match.group(1).strip()
if len(parking) < 100: # Avoid capturing too much text
prop.parking = parking
# Boolean features - check for presence
if re.search(r'\bAscensore\b', page_text, re.IGNORECASE):
prop.elevator = 'Si'
if re.search(r'\bBalcone\b', page_text, re.IGNORECASE):
prop.balcony = 'Si'
if re.search(r'\bTerrazzo\b|\bTerrazza\b', page_text, re.IGNORECASE):
prop.terrace = 'Si'
if re.search(r'\bGiardino\b', page_text, re.IGNORECASE):
prop.garden = 'Si'
if re.search(r'\bAria condizionata\b|\bClimatizzatore\b', page_text, re.IGNORECASE):
prop.air_conditioning = 'Si'
if re.search(r'\bArredato\b', page_text, re.IGNORECASE):
prop.furnished = 'Si'
if re.search(r'\bDisponibilità immediata\b|\bDisponibile subito\b', page_text, re.IGNORECASE):
prop.available_immediately = 'Si'
# Parse description
desc_section = None
for heading in soup.find_all(['h6', 'h2', 'h3']):
if 'Descrizione' in heading.get_text():
desc_section = heading.find_next_sibling()
break
if desc_section:
desc_text = desc_section.get_text(strip=True)
if desc_text:
prop.description = desc_text[:2000] # Limit description length
# Parse seller info
# Look for seller name in h6 or similar
for heading in soup.find_all('h6'):
text = heading.get_text(strip=True)
if text and len(text) < 50 and not any(x in text.lower() for x in ['caratteristiche', 'descrizione', 'dati', 'finanziamento']):
# Check if this might be a name
parent = heading.find_parent()
if parent and 'utente' in str(parent):
prop.seller_name = text
break
# Parse date posted
date_match = re.search(r'(Oggi alle \d{1,2}:\d{2}|\d{1,2}\s+\w+\s+alle\s+\d{1,2}:\d{2})', page_text)
if date_match:
prop.date_posted = date_match.group(1).strip()
# Parse favorites count
fav_match = re.search(r'(\d+)\s*person[ae]?\s*hanno', page_text)
if fav_match:
prop.favorites_count = fav_match.group(1)
return prop
def parse_listing_item(item_html: str, url: str) -> Property:
"""Parse a property from listing page item (for basic info)."""
soup = BeautifulSoup(item_html, 'lxml')
prop = Property(url=url)
prop.listing_id = extract_listing_id(url)
prop.property_type = extract_property_type_from_url(url)
prop.contract_type = extract_contract_type_from_url(url)
# Get title
title_elem = soup.find('h3') or soup.find('h2')
if title_elem:
prop.title = title_elem.get_text(strip=True)
# Get text content
text = soup.get_text()
# Parse price
price_match = re.search(r'([\d.,]+)\s*€', text)
if price_match:
prop.price = price_match.group(0).strip()
# Parse location
loc_match = re.search(r'([A-Za-z\s\'-]+)\s*\(([A-Z]{2})\)', text)
if loc_match:
prop.city = loc_match.group(1).strip()
prop.province = loc_match.group(2).strip()
# Parse area
area_match = re.search(r'(\d+)\s*mq', text)
if area_match:
prop.living_area = area_match.group(1)
# Parse rooms
rooms_match = re.search(r'(\d+)\s*[Ll]ocal[ei]', text)
if rooms_match:
prop.rooms = rooms_match.group(1)
# Parse floor
floor_match = re.search(r'(\d+)[°º]?\s*[Pp]iano', text)
if floor_match:
prop.floor = floor_match.group(1)
# Parse bathrooms
bath_match = re.search(r'(\d+)\s*[Bb]agn[oi]', text)
if bath_match:
prop.bathrooms = bath_match.group(1)
return prop