diff --git a/uk_bin_collection/uk_bin_collection/councils/NewhamCouncil.py b/uk_bin_collection/uk_bin_collection/councils/NewhamCouncil.py index 937f32af02..68ea843ef3 100644 --- a/uk_bin_collection/uk_bin_collection/councils/NewhamCouncil.py +++ b/uk_bin_collection/uk_bin_collection/councils/NewhamCouncil.py @@ -19,7 +19,7 @@ def parse_data(self, page: str, **kwargs) -> dict: raise ValueError(f"Error getting identifier: {str(e)}") # Make a BS4 object - page = requests.get(url) + page = requests.get(url, verify=False) soup = BeautifulSoup(page.text, "html.parser") soup.prettify @@ -36,20 +36,27 @@ def parse_data(self, page: str, **kwargs) -> dict: if len(sections_recycling) > 0: sections.append(sections_recycling[0]) + # as well as one for food waste + sections_food_waste = soup.find_all( + "div", {"class": "card h-100 card-food"} + ) + if len(sections_food_waste) > 0: + sections.append(sections_food_waste[0]) + # For each bin section, get the text and the list elements for item in sections: header = item.find("div", {"class": "card-header"}) bin_type_element = header.find_next("b") if bin_type_element is not None: bin_type = bin_type_element.text - array_expected_types = ["Domestic", "Recycling"] + array_expected_types = ["Domestic", "Recycling", "Food Waste"] if bin_type in array_expected_types: date = ( item.find_next("p", {"class": "card-text"}) .find_next("mark") .next_sibling.strip() ) - next_collection = datetime.strptime(date, "%d/%m/%Y") + next_collection = datetime.strptime(date, "%m/%d/%Y") dict_data = { "type": bin_type,