mirror of
https://github.com/sascha-hemi/hacs_waste_collection_schedule.git
synced 2026-03-21 04:06:03 +01:00
Merge branch 'master' into awb_oldenburg_de-2023
This commit is contained in:
@@ -124,6 +124,12 @@ async def async_setup(hass: HomeAssistant, config: dict):
|
||||
# initial fetch of all data
|
||||
hass.add_job(api._fetch)
|
||||
|
||||
def fetch_data():
|
||||
hass.add_job(api._fetch)
|
||||
|
||||
# Register new Service fetch_data
|
||||
hass.services.async_register(DOMAIN, 'fetch_data', fetch_data)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@@ -6,5 +6,5 @@
|
||||
"dependencies": [],
|
||||
"codeowners": ["@mampfes"],
|
||||
"iot_class": "cloud_polling",
|
||||
"version": "1.31.0"
|
||||
"version": "1.32.0"
|
||||
}
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
fetch_data:
|
||||
name: fetch_data
|
||||
description: fetch current source data
|
||||
@@ -1,8 +1,14 @@
|
||||
import datetime
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class CollectionBase(dict): # inherit from dict to enable JSON serialization
|
||||
def __init__(self, date: datetime.date, icon: str = None, picture: str = None):
|
||||
def __init__(
|
||||
self,
|
||||
date: datetime.date,
|
||||
icon: Optional[str] = None,
|
||||
picture: Optional[str] = None,
|
||||
):
|
||||
dict.__init__(self, date=date.isoformat(), icon=icon, picture=picture)
|
||||
self._date = date # store date also as python date object
|
||||
|
||||
@@ -31,7 +37,11 @@ class CollectionBase(dict): # inherit from dict to enable JSON serialization
|
||||
|
||||
class Collection(CollectionBase):
|
||||
def __init__(
|
||||
self, date: datetime.date, t: str, icon: str = None, picture: str = None
|
||||
self,
|
||||
date: datetime.date,
|
||||
t: str,
|
||||
icon: Optional[str] = None,
|
||||
picture: Optional[str] = None,
|
||||
):
|
||||
CollectionBase.__init__(self, date=date, icon=icon, picture=picture)
|
||||
self["type"] = t
|
||||
|
||||
@@ -18,12 +18,7 @@ class ICS_v1:
|
||||
|
||||
def convert(self, ics_data):
|
||||
# parse ics file
|
||||
try:
|
||||
calendar = icalendar.Calendar.from_ical(ics_data)
|
||||
except Exception as err:
|
||||
_LOGGER.error(f"Parsing ics data failed:{str(err)}")
|
||||
_LOGGER.debug(ics_data)
|
||||
return []
|
||||
|
||||
# calculate start- and end-date for recurring events
|
||||
start_date = datetime.datetime.now().replace(
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
#Work around SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED errors using method discussed in
|
||||
# https://stackoverflow.com/questions/71603314/ssl-error-unsafe-legacy-renegotiation-disabled
|
||||
|
||||
import requests
|
||||
import ssl
|
||||
import urllib3
|
||||
|
||||
class CustomHttpAdapter (requests.adapters.HTTPAdapter):
|
||||
# "Transport adapter" that allows us to use custom ssl_context.
|
||||
|
||||
def __init__(self, ssl_context=None, **kwargs):
|
||||
self.ssl_context = ssl_context
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = urllib3.poolmanager.PoolManager(
|
||||
num_pools=connections, maxsize=maxsize,
|
||||
block=block, ssl_context=self.ssl_context)
|
||||
|
||||
|
||||
def get_legacy_session():
|
||||
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
ctx.options |= 0x4 # OP_LEGACY_SERVER_CONNECT
|
||||
session = requests.session()
|
||||
session.mount('https://', CustomHttpAdapter(ctx))
|
||||
return session
|
||||
|
||||
@@ -8,6 +8,12 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
TITLE = "A-Region"
|
||||
DESCRIPTION = "Source for A-Region, Switzerland waste collection."
|
||||
URL = "https://www.a-region.ch"
|
||||
|
||||
|
||||
def EXTRA_INFO():
|
||||
return [{"title": m} for m in MUNICIPALITIES]
|
||||
|
||||
|
||||
TEST_CASES = {
|
||||
"Andwil": {"municipality": "Andwil"},
|
||||
"Rorschach": {"municipality": "Rorschach", "district": "Unteres Stadtgebiet"},
|
||||
|
||||
@@ -7,11 +7,12 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AbfallPlus"
|
||||
TITLE = "Abfall.IO / AbfallPlus"
|
||||
DESCRIPTION = (
|
||||
"Source for AbfallPlus.de waste collection. Service is hosted on abfall.io."
|
||||
)
|
||||
URL = "https://www.abfallplus.de"
|
||||
COUNTRY = "de"
|
||||
TEST_CASES = {
|
||||
"Waldenbuch": {
|
||||
"key": "8215c62763967916979e0e8566b6172e",
|
||||
@@ -56,7 +57,13 @@ TEST_CASES = {
|
||||
"f_id_strasse": 621,
|
||||
"f_id_strasse_hnr": 872,
|
||||
"f_abfallarten": [27, 28, 17, 67],
|
||||
}
|
||||
},
|
||||
"ALBA Berlin": {
|
||||
"key": "9583a2fa1df97ed95363382c73b41b1b",
|
||||
"f_id_kommune": 3227,
|
||||
"f_id_strasse": 3475,
|
||||
"f_id_strasse_hnr": 185575,
|
||||
},
|
||||
}
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@@ -148,10 +155,10 @@ class Source:
|
||||
# - AWB Limburg-Weilheim uses this list to select a "Sonderabfall <city>"
|
||||
# waste type. The warning could be removed by adding the extra config
|
||||
# option "f_abfallarten" with the following values [27, 28, 17, 67]
|
||||
html_warnings = re.findall("\<b.*",ics_file)
|
||||
html_warnings = re.findall(r"\<b.*", ics_file)
|
||||
if html_warnings:
|
||||
ics_file = re.sub("\<br.*|\<b.*", "\\r", ics_file)
|
||||
#_LOGGER.warning("Html tags removed from ics file: " + ', '.join(html_warnings))
|
||||
ics_file = re.sub(r"\<br.*|\<b.*", "\\r", ics_file)
|
||||
# _LOGGER.warning("Html tags removed from ics file: " + ', '.join(html_warnings))
|
||||
|
||||
dates = self._ics.convert(ics_file)
|
||||
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Abfall Neunkirchen Siegerland"
|
||||
TITLE = "Neunkirchen Siegerland"
|
||||
DESCRIPTION = " Source for 'Abfallkalender Neunkirchen Siegerland'."
|
||||
URL = "https://www.neunkirchen-siegerland.de"
|
||||
TEST_CASES = {
|
||||
"Waldstraße":{ "street":"Waldstr"}
|
||||
}
|
||||
TEST_CASES = {"Waldstraße": {"strasse": "Waldstr"}}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, strasse):
|
||||
self._strasse = strasse
|
||||
@@ -19,13 +19,20 @@ class Source:
|
||||
|
||||
def fetch(self):
|
||||
|
||||
args = {"out":"json", "type": "abto", "select":"2", "refid": "3362.1", "term": self._strasse }
|
||||
args = {
|
||||
"out": "json",
|
||||
"type": "abto",
|
||||
"select": "2",
|
||||
"refid": "3362.1",
|
||||
"term": self._strasse,
|
||||
}
|
||||
header = {"referer": "https://www.neunkirchen-siegerland.de"}
|
||||
r = requests.get("https://www.neunkirchen-siegerland.de/output/autocomplete.php", params=args,headers=header)
|
||||
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Error querying calender data")
|
||||
return []
|
||||
r = requests.get(
|
||||
"https://www.neunkirchen-siegerland.de/output/autocomplete.php",
|
||||
params=args,
|
||||
headers=header,
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
ids = r.json()
|
||||
|
||||
@@ -33,19 +40,22 @@ class Source:
|
||||
raise Exception("no address found")
|
||||
|
||||
if len(ids) > 1:
|
||||
raise Exception (" to many addresses found, specify more detailed street name")
|
||||
raise Exception(
|
||||
" to many addresses found, specify more detailed street name"
|
||||
)
|
||||
|
||||
args = {"ModID":48, "call": "ical", "pois": ids[0][0], "kat": 1, "alarm":0}
|
||||
r = requests.get("https://www.neunkirchen-siegerland.de/output/options.php", params=args,headers=header)
|
||||
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Error querying calender data")
|
||||
return []
|
||||
args = {"ModID": 48, "call": "ical", "pois": ids[0][0], "kat": 1, "alarm": 0}
|
||||
r = requests.get(
|
||||
"https://www.neunkirchen-siegerland.de/output/options.php",
|
||||
params=args,
|
||||
headers=header,
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
entries = []
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0],d[1]))
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
|
||||
return entries
|
||||
|
||||
@@ -4,7 +4,7 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Abfall Zollernalbkreis"
|
||||
TITLE = "Abfallwirtschaft Zollernalbkreis"
|
||||
DESCRIPTION = "Source for Abfallwirtschaft Zollernalbkreis waste collection."
|
||||
URL = "https://www.abfallkalender-zak.de"
|
||||
TEST_CASES = {
|
||||
@@ -42,14 +42,7 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city, types, street=None):
|
||||
self._city = city
|
||||
self._street = street
|
||||
self._types = types
|
||||
self._ics = ICS()
|
||||
self._iconMap = {
|
||||
ICON_MAP = {
|
||||
"Restmüll": "mdi:trash-can",
|
||||
"Grünabfall" : "mdi:leaf",
|
||||
"Gelber Sack" : "mdi:sack",
|
||||
@@ -57,7 +50,15 @@ class Source:
|
||||
"Bildschirm-/Kühlgeräte" : "mdi:television-classic",
|
||||
"Schadstoffsammlung" : "mdi:biohazard",
|
||||
"altmetalle" : "mdi:nail",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city, types, street=None):
|
||||
self._city = city
|
||||
self._street = street
|
||||
self._types = types
|
||||
self._ics = ICS()
|
||||
|
||||
def fetch(self):
|
||||
now = datetime.now()
|
||||
@@ -95,6 +96,6 @@ class Source:
|
||||
waste_type = d[1]
|
||||
next_pickup_date = d[0]
|
||||
|
||||
entries.append(Collection(date=next_pickup_date, t=waste_type, icon=self._iconMap.get(waste_type,"mdi:trash-can")))
|
||||
entries.append(Collection(date=next_pickup_date, t=waste_type, icon=ICON_MAP.get(waste_type,"mdi:trash-can")))
|
||||
|
||||
return entries
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.AbfallnaviDe import AbfallnaviDe
|
||||
|
||||
TITLE = "AbfallNavi"
|
||||
TITLE = "AbfallNavi (RegioIT.de)"
|
||||
DESCRIPTION = (
|
||||
"Source for AbfallNavi waste collection. AbfallNavi is a brand name of regioit.de."
|
||||
)
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
import urllib
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
import urllib
|
||||
|
||||
TITLE = "Landkreis Forchheim"
|
||||
TITLE = "Abfalltermine Forchheim"
|
||||
DESCRIPTION = "Source for Landkreis Forchheim"
|
||||
URL = "https://www.abfalltermine-forchheim.de/"
|
||||
TEST_CASES = {
|
||||
"Dormitz": {"city": "Dormitz", "area": "Dormitz"},
|
||||
"Rüsselbach": {"city": "Igensdorf", "area": "Oberrüsselbach"},
|
||||
"Kellerstraße": {"city": "Forchheim", "area": "Untere Kellerstraße (ab Adenauerallee bis Piastenbrücke)"}
|
||||
"Kellerstraße": {
|
||||
"city": "Forchheim",
|
||||
"area": "Untere Kellerstraße (ab Adenauerallee bis Piastenbrücke)",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +28,7 @@ class Source:
|
||||
r = requests.get(
|
||||
f"http://www.abfalltermine-forchheim.de/Forchheim/Landkreis/{place}/ics?RESTMUELL=true&RESTMUELL_SINGLE=true&BIO=true&YELLOW_SACK=true&PAPER=true"
|
||||
)
|
||||
r.encoding = r.apparent_encoding
|
||||
r.encoding = "utf-8"
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
entries = []
|
||||
|
||||
@@ -5,9 +5,17 @@ import pytz
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "ALW Wolfenbüttel"
|
||||
# With verify=True the POST fails due to a SSLCertVerificationError.
|
||||
# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
|
||||
# These two lines areused to suppress the InsecureRequestWarning when using verify=False
|
||||
import urllib3
|
||||
urllib3.disable_warnings()
|
||||
|
||||
TITLE = "Abfallwirtschaft Landkreis Wolfenbüttel"
|
||||
DESCRIPTION = "Source for ALW Wolfenbüttel."
|
||||
URL = "https://abfallapp.alw-wf.de"
|
||||
URL = "https://alw-wf.de"
|
||||
TEST_CASES = {
|
||||
"Linden alte Straße": {"ort": "Linden mit Okertalsiedlung", "strasse": "Siedlung"},
|
||||
"Linden neuere Straße": {
|
||||
@@ -17,6 +25,7 @@ TEST_CASES = {
|
||||
"Dettum": {"ort": "Dettum", "strasse": "Egal!"},
|
||||
}
|
||||
|
||||
API_URL = "https://abfallapp.alw-wf.de"
|
||||
AUTH_DATA = {
|
||||
"auth": {
|
||||
"Name": "ALW",
|
||||
@@ -41,7 +50,7 @@ class Source:
|
||||
auth_params = json.dumps(AUTH_DATA)
|
||||
|
||||
# ALW WF uses a self-signed certificate so we need to disable certificate verification
|
||||
r = requests.post(f"{URL}/GetOrte.php", data=auth_params, verify=False)
|
||||
r = requests.post(f"{API_URL}/GetOrte.php", data=auth_params, verify=False)
|
||||
orte = r.json()
|
||||
if orte["result"][0]["StatusCode"] != 200:
|
||||
raise Exception(f"Error getting Orte: {orte['result'][0]['StatusMsg']}")
|
||||
@@ -53,7 +62,7 @@ class Source:
|
||||
if ort_id is None:
|
||||
raise Exception(f"Error finding Ort {self._ort}")
|
||||
|
||||
r = requests.post(f"{URL}/GetStrassen.php", data=auth_params, verify=False)
|
||||
r = requests.post(f"{API_URL}/GetStrassen.php", data=auth_params, verify=False)
|
||||
strassen = r.json()
|
||||
if strassen["result"][0]["StatusCode"] != 200:
|
||||
raise Exception(
|
||||
@@ -73,7 +82,7 @@ class Source:
|
||||
if strasse_id is None:
|
||||
raise Exception(f"Error finding Straße {self._strasse}")
|
||||
|
||||
r = requests.post(f"{URL}/GetArten.php", data=auth_params, verify=False)
|
||||
r = requests.post(f"{API_URL}/GetArten.php", data=auth_params, verify=False)
|
||||
arten = r.json()
|
||||
if arten["result"][0]["StatusCode"] != 200:
|
||||
raise Exception(f"Error getting Arten: {arten['result'][0]['StatusMsg']}")
|
||||
@@ -84,7 +93,7 @@ class Source:
|
||||
|
||||
entries = []
|
||||
r = requests.post(
|
||||
f"{URL}/GetTermine.php/{strasse_id}", data=auth_params, verify=False
|
||||
f"{API_URL}/GetTermine.php/{strasse_id}", data=auth_params, verify=False
|
||||
)
|
||||
termine = r.json()
|
||||
if termine["result"][0]["StatusCode"] != 200:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import contextlib
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote
|
||||
from typing import Optional
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Abfall ART Trier"
|
||||
TITLE = "ART Trier"
|
||||
DESCRIPTION = "Source for waste collection of ART Trier."
|
||||
URL = "https://www.art-trier.de"
|
||||
TEST_CASES = {
|
||||
@@ -43,26 +43,25 @@ ICON_MAP = {
|
||||
"Restmüll": "mdi:trash-can",
|
||||
"Gelber Sack": "mdi:recycle",
|
||||
}
|
||||
SPECIAL_CHARS = {
|
||||
ord("ä"): "ae",
|
||||
ord("ü"): "ue",
|
||||
ord("ö"): "oe",
|
||||
ord("ß"): "ss",
|
||||
ord("("): "",
|
||||
ord(")"): "",
|
||||
ord("."): "",
|
||||
}
|
||||
SPECIAL_CHARS = str.maketrans(
|
||||
{
|
||||
" ": "_",
|
||||
"ä": "ae",
|
||||
"ü": "ue",
|
||||
"ö": "oe",
|
||||
"ß": "ss",
|
||||
"(": None,
|
||||
")": None,
|
||||
",": None,
|
||||
".": None,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, district: str, zip_code: str):
|
||||
self._district = quote(
|
||||
district.lower()
|
||||
.removeprefix("stadt ")
|
||||
.replace(" ", "_")
|
||||
.replace(",", "")
|
||||
.translate(SPECIAL_CHARS)
|
||||
.strip()
|
||||
district.lower().removeprefix("stadt ").translate(SPECIAL_CHARS).strip()
|
||||
)
|
||||
self._zip_code = zip_code
|
||||
self._ics = ICS(regex=r"^A.R.T. Abfuhrtermin: (.*)", split_at=r" & ")
|
||||
@@ -70,12 +69,12 @@ class Source:
|
||||
def fetch(self):
|
||||
url = f"{API_URL}/{self._zip_code}_{self._district}_{REMINDER_DAY}-{REMINDER_TIME}.ics"
|
||||
|
||||
r = requests.get(url)
|
||||
schedule = self._ics.convert(r.text)
|
||||
res = requests.get(url)
|
||||
res.raise_for_status()
|
||||
|
||||
schedule = self._ics.convert(res.text)
|
||||
|
||||
return [
|
||||
Collection(
|
||||
date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1], "mdi:trash-can")
|
||||
)
|
||||
Collection(date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1]))
|
||||
for entry in schedule
|
||||
]
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Ashfield District Council"
|
||||
DESCRIPTION = "Source for ashfield.gov.uk, Ashfield District Council, UK"
|
||||
URL = "https://www.ashfield.gov.uk"
|
||||
TEST_CASES = {
|
||||
"11 Maun View Gardens, Sutton-in-Ashfield": {"uprn": 10001336299},
|
||||
"4A Station Street, Kirkby-in-Ashfield": {"post_code": "NG177AR", "number": "4A"},
|
||||
"Ashfield District Council": {
|
||||
"post_code": "NG17 8DA",
|
||||
"name": "Ashfield District Council",
|
||||
},
|
||||
}
|
||||
|
||||
API_URLS = {
|
||||
"address_search": "https://www.ashfield.gov.uk/api/powersuite/getaddresses/{postcode}",
|
||||
"collection": "https://www.ashfield.gov.uk/api/powersuite/GetCollectionByUprnAndDate/{uprn}",
|
||||
}
|
||||
|
||||
ICON_MAP = {
|
||||
"Residual Waste Collection Service": "mdi:trash-can",
|
||||
"Domestic Recycling Collection Service": "mdi:recycle",
|
||||
"Domestic Glass Collection Service": "mdi:glass-fragile",
|
||||
"Garden Waste Collection Service": "mdi:leaf",
|
||||
}
|
||||
|
||||
NAMES = {
|
||||
"Residual Waste Collection Service": "Red (rubbish)",
|
||||
"Domestic Recycling Collection Service": "Green (recycling)",
|
||||
"Domestic Glass Collection Service": "Blue (glass)",
|
||||
"Garden Waste Collection Service": "Brown (garden)",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, post_code=None, number=None, name=None, uprn=None):
|
||||
self._post_code = post_code
|
||||
self._number = number
|
||||
self._name = name
|
||||
self._uprn = uprn
|
||||
|
||||
def fetch(self):
|
||||
if not self._uprn:
|
||||
# look up the UPRN for the address
|
||||
q = str(API_URLS["address_search"]).format(postcode=self._post_code)
|
||||
r = requests.get(q)
|
||||
r.raise_for_status()
|
||||
addresses = r.json()["data"]
|
||||
|
||||
if self._name:
|
||||
self._uprn = [
|
||||
int(x["AccountSiteUprn"])
|
||||
for x in addresses
|
||||
if x["SiteAddressName"].capitalize() == self._name.capitalize()
|
||||
][0]
|
||||
elif self._number:
|
||||
self._uprn = [
|
||||
int(x["AccountSiteUprn"])
|
||||
for x in addresses
|
||||
if x["SiteAddressNumber"] == self._number
|
||||
][0]
|
||||
|
||||
if not self._uprn:
|
||||
raise Exception(
|
||||
f"Could not find address {self._post_code} {self._number}{self._name}"
|
||||
)
|
||||
|
||||
q = str(API_URLS["collection"]).format(uprn=self._uprn)
|
||||
|
||||
r = requests.get(q)
|
||||
r.raise_for_status()
|
||||
|
||||
collections = r.json()["data"]
|
||||
entries = []
|
||||
|
||||
if collections:
|
||||
for collection in collections:
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.datetime.strptime(
|
||||
collection["Date"], "%d/%m/%Y %H:%M:%S"
|
||||
).date(),
|
||||
t=NAMES.get(collection["Service"]),
|
||||
icon=ICON_MAP.get(collection["Service"]),
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
@@ -1,16 +1,13 @@
|
||||
import datetime
|
||||
from html.parser import HTMLParser
|
||||
|
||||
import requests
|
||||
# import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
# Updated to work around SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error using method discussed in
|
||||
# https://stackoverflow.com/questions/71603314/ssl-error-unsafe-legacy-renegotiation-disabled
|
||||
import ssl
|
||||
import urllib3
|
||||
# Include work around for SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error
|
||||
from waste_collection_schedule.service.SSLError import get_legacy_session
|
||||
|
||||
|
||||
TITLE = "Auckland council"
|
||||
TITLE = "Auckland Council"
|
||||
DESCRIPTION = "Source for Auckland council."
|
||||
URL = "https://aucklandcouncil.govt.nz"
|
||||
TEST_CASES = {
|
||||
@@ -34,30 +31,6 @@ MONTH = {
|
||||
}
|
||||
|
||||
|
||||
# Additional code snippet to work around SSL issue
|
||||
class CustomHttpAdapter (requests.adapters.HTTPAdapter):
|
||||
# "Transport adapter" that allows us to use custom ssl_context.
|
||||
|
||||
def __init__(self, ssl_context=None, **kwargs):
|
||||
self.ssl_context = ssl_context
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = urllib3.poolmanager.PoolManager(
|
||||
num_pools=connections, maxsize=maxsize,
|
||||
block=block, ssl_context=self.ssl_context)
|
||||
|
||||
|
||||
def get_legacy_session():
|
||||
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
ctx.options |= 0x4 # OP_LEGACY_SERVER_CONNECT
|
||||
session = requests.session()
|
||||
session.mount('https://', CustomHttpAdapter(ctx))
|
||||
return session
|
||||
# End SSL issue code snippet
|
||||
|
||||
|
||||
|
||||
def toDate(formattedDate):
|
||||
items = formattedDate.split()
|
||||
return datetime.date(int(items[3]), MONTH[items[2]], int(items[1]))
|
||||
@@ -144,13 +117,6 @@ class Source:
|
||||
# verify=False,
|
||||
)
|
||||
|
||||
# Original request code
|
||||
# r = requests.get(
|
||||
# "https://www.aucklandcouncil.govt.nz/rubbish-recycling/rubbish-recycling-collections/Pages/collection-day-detail.aspx",
|
||||
# params=params,
|
||||
# verify=False,
|
||||
# )
|
||||
|
||||
p = WasteSearchResultsParser()
|
||||
p.feed(r.text)
|
||||
return p.entries
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "avl-ludwigsburg.de"
|
||||
DESCRIPTION = "Abfallverwertungsgesellschaft des Landkreises Ludwigsburg mbH"
|
||||
URL = "https://www.avl-ludwigsburg.de/privatkunden/termine/abfallkalender/suche/"
|
||||
|
||||
TEST_CASES = {
|
||||
"CityWithoutStreet": {"city": "Möglingen"},
|
||||
"CityWithStreet": {"city": "Ludwigsburg", "street": "Bahnhofstraße"},
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city, street=None):
|
||||
self._city = city
|
||||
self._street = street
|
||||
self._ics = ICS()
|
||||
|
||||
def fetch(self):
|
||||
# Get the hidden parameters by loading the page
|
||||
session = requests.Session()
|
||||
r = session.get(URL)
|
||||
r.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
hidden_tags = soup.find_all("input", type="hidden")
|
||||
|
||||
# Prepare data for the real web request
|
||||
data = {}
|
||||
for tag in hidden_tags:
|
||||
data[tag.get("name")] = tag.get("value")
|
||||
|
||||
# Find the cities which do need a street name
|
||||
data_cities_with_streets = soup.find_all(
|
||||
"input", type="text", placeholder="Ort eingeben"
|
||||
)
|
||||
cities_with_streets = ""
|
||||
for tag in data_cities_with_streets:
|
||||
cities_with_streets += tag.get("data-cities-with-streets")
|
||||
cities_with_streets = cities_with_streets.split(",")
|
||||
|
||||
data["tx_avlcollections_pi5[wasteCalendarLocationItem]"] = self._city
|
||||
data["tx_avlcollections_pi5[wasteCalendarStreetItem]"] = self._street
|
||||
|
||||
# Remove some data which the webserver doesn't like
|
||||
data.pop("id", None)
|
||||
data.pop("tx_kesearch_pi1[page]", None)
|
||||
data.pop("tx_kesearch_pi1[resetFilters]", None)
|
||||
data.pop("tx_kesearch_pi1[sortByField]", None)
|
||||
data.pop("tx_kesearch_pi1[sortByDir]", None)
|
||||
|
||||
# Depending on the city remove the street from the data set
|
||||
if self._city.lower() not in cities_with_streets:
|
||||
data.pop("tx_avlcollections_pi5[wasteCalendarStreetItem]", None)
|
||||
|
||||
# Get the final data
|
||||
r = session.post(URL, data=data)
|
||||
r.raise_for_status()
|
||||
|
||||
if r.text.find("Ort konnte nicht gefunden werden.") != -1:
|
||||
raise Exception("Error: Ort konnte nicht gefunden werden.")
|
||||
|
||||
if r.text.find("Straße konnte nicht gefunden werden.") != -1:
|
||||
raise Exception("Error: Ort konnte nicht gefunden werden.")
|
||||
|
||||
if r.text.find(".ics") == -1:
|
||||
raise Exception("Error: No ics link found.")
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
downloads = soup.find_all("a", href=True)
|
||||
ics_link = ""
|
||||
for download in downloads:
|
||||
link = download.get("href")
|
||||
if ".ics" in link:
|
||||
ics_link = link
|
||||
full_url = "https://www.avl-ludwigsburg.de" + ics_link
|
||||
return self.fetch_ics(full_url)
|
||||
|
||||
def fetch_ics(self, url):
|
||||
r = requests.get(url)
|
||||
r.raise_for_status()
|
||||
|
||||
# Parse ics file
|
||||
r.encoding = "utf-8"
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
entries = []
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
return entries
|
||||
@@ -3,9 +3,9 @@ from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AW Harburg"
|
||||
TITLE = "Abfallwirtschaft Landkreis Harburg"
|
||||
DESCRIPTION = "Abfallwirtschaft Landkreis Harburg"
|
||||
URL = "https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/"
|
||||
URL = "https://www.landkreis-harburg.de"
|
||||
|
||||
TEST_CASES = {
|
||||
"CityWithTwoLevels": {"level_1": "Hanstedt", "level_2": "Evendorf"},
|
||||
@@ -16,6 +16,9 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
API_URL = (
|
||||
"https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/"
|
||||
)
|
||||
HEADERS = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
|
||||
}
|
||||
@@ -33,11 +36,11 @@ class Source:
|
||||
# Get the IDs of the districts on the first level
|
||||
# Double loading is on purpose because sometimes the webpage has an overlay
|
||||
# which is gone on the second try in a session
|
||||
r = session.get(URL, headers=HEADERS)
|
||||
r = session.get(API_URL, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
if "Zur aufgerufenen Seite" in r.text:
|
||||
r = session.get(URL, headers=HEADERS)
|
||||
if r.status_code != 200:
|
||||
raise Exception(f"Error: failed to fetch first url: {URL}")
|
||||
r = session.get(API_URL, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
|
||||
# Get the IDs of the districts on the first level
|
||||
id = self.parse_level(r.text, 1)
|
||||
@@ -53,8 +56,7 @@ class Source:
|
||||
"selected_ebene": 0,
|
||||
}
|
||||
r = session.get(url, params=params, headers=HEADERS)
|
||||
if r.status_code != 200:
|
||||
raise Exception(f"Error: failed to fetch second url: {url}")
|
||||
r.raise_for_status()
|
||||
|
||||
# Get the IDs of the districts on the second level
|
||||
id = self.parse_level(r.text, 2)
|
||||
@@ -69,8 +71,7 @@ class Source:
|
||||
"selected_ebene": 0,
|
||||
}
|
||||
r = session.get(url, params=params, headers=HEADERS)
|
||||
if r.status_code != 200:
|
||||
raise Exception(f"Error: failed to fetch third url: {url}")
|
||||
r.raise_for_status()
|
||||
|
||||
# Get the IDs of the districts on the third level
|
||||
id = self.parse_level(r.text, 3)
|
||||
@@ -82,6 +83,7 @@ class Source:
|
||||
"owner": 20100,
|
||||
}
|
||||
r = session.get(url, params=params, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
|
||||
# Sometimes there is no garbage calendar available
|
||||
if "Es sind keine Abfuhrbezirke hinterlegt." in r.text:
|
||||
|
||||
@@ -37,16 +37,17 @@ class Source:
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
downloads = soup.find_all("a", href=True)
|
||||
ics_url = None
|
||||
ics_urls = list()
|
||||
for download in downloads:
|
||||
href = download.get("href")
|
||||
if "t=ics" in href:
|
||||
ics_url = href
|
||||
break
|
||||
if "t=ics" in href and href not in ics_urls: #The website lists the same url multiple times, we only want it once
|
||||
ics_urls.append(href)
|
||||
|
||||
if ics_url is None:
|
||||
if not ics_urls:
|
||||
raise Exception(f"ics url not found")
|
||||
|
||||
entries = []
|
||||
for ics_url in ics_urls:
|
||||
# get ics file
|
||||
r = session.get(ics_url, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
@@ -54,7 +55,7 @@ class Source:
|
||||
# parse ics file
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
entries = []
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
|
||||
return entries
|
||||
|
||||
6
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_oldenburg_de.py
Executable file → Normal file
6
custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_oldenburg_de.py
Executable file → Normal file
@@ -8,11 +8,13 @@ from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AWB Oldenburg"
|
||||
DESCRIPTION = "Source for 'Abfallwirtschaftsbetrieb Stadt Oldenburg (Oldb)'."
|
||||
URL = "https://www.oldenburg.de/startseite/leben-umwelt/awb/abfall-a-z/abfuhrkalender.html"
|
||||
URL = "https://www.oldenburg.de"
|
||||
TEST_CASES = {
|
||||
"Polizeiinspektion Oldenburg": {"street": "Friedhofsweg", "house_number": 30}
|
||||
}
|
||||
|
||||
API_URL = "https://www.oldenburg.de/startseite/leben-umwelt/awb/abfall-a-z/abfuhrkalender.html"
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
class Source:
|
||||
@@ -51,7 +53,7 @@ class Source:
|
||||
args = urllib.parse.urlencode(args, quote_via=urllib.parse.quote)
|
||||
|
||||
# post request
|
||||
r = requests.get(URL, params=args)
|
||||
r = requests.get(API_URL, params=args)
|
||||
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "AWIDO"
|
||||
TITLE = "AWIDO Online"
|
||||
DESCRIPTION = "Source for AWIDO waste collection."
|
||||
URL = "https://www.awido-online.de/"
|
||||
TEST_CASES = {
|
||||
@@ -44,14 +43,14 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getPlaces/client={self._customer}"
|
||||
)
|
||||
places = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
places = r.json()
|
||||
|
||||
# create city to key map from retrieved places
|
||||
city_to_oid = {place["value"].strip(): place["key"] for (place) in places}
|
||||
|
||||
if self._city not in city_to_oid:
|
||||
_LOGGER.error(f"city not found: {self._city}")
|
||||
return []
|
||||
raise Exception(f"city not found: {self._city}")
|
||||
|
||||
oid = city_to_oid[self._city]
|
||||
|
||||
@@ -62,7 +61,8 @@ class Source:
|
||||
f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getGroupedStreets/{oid}",
|
||||
params={"client": self._customer},
|
||||
)
|
||||
streets = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
streets = r.json()
|
||||
|
||||
# create street to key map from retrieved places
|
||||
street_to_oid = {
|
||||
@@ -78,7 +78,8 @@ class Source:
|
||||
f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getGroupedStreets/{oid}",
|
||||
params={"client": self._customer},
|
||||
)
|
||||
streets = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
streets = r.json()
|
||||
|
||||
# create street to key map from retrieved places
|
||||
street_to_oid = {
|
||||
@@ -86,8 +87,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._street not in street_to_oid:
|
||||
_LOGGER.error(f"street not found: {self._street}")
|
||||
return []
|
||||
raise Exception(f"street not found: {self._street}")
|
||||
|
||||
oid = street_to_oid[self._street]
|
||||
|
||||
@@ -96,7 +96,8 @@ class Source:
|
||||
f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getStreetAddons/{oid}",
|
||||
params={"client": self._customer},
|
||||
)
|
||||
hsnbrs = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
hsnbrs = r.json()
|
||||
|
||||
# create housenumber to key map from retrieved places
|
||||
hsnbr_to_oid = {
|
||||
@@ -104,8 +105,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._housenumber not in hsnbr_to_oid:
|
||||
_LOGGER.error(f"housenumber not found: {self._housenumber}")
|
||||
return []
|
||||
raise Exception(f"housenumber not found: {self._housenumber}")
|
||||
|
||||
oid = hsnbr_to_oid[self._housenumber]
|
||||
|
||||
@@ -114,7 +114,8 @@ class Source:
|
||||
f"https://awido.cubefour.de/WebServices/Awido.Service.svc/secure/getData/{oid}",
|
||||
params={"fractions": "", "client": self._customer},
|
||||
)
|
||||
cal_json = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
cal_json = r.json()
|
||||
|
||||
# map fraction code to fraction name
|
||||
fractions = {fract["snm"]: fract["nm"] for (fract) in cal_json["fracts"]}
|
||||
|
||||
@@ -5,7 +5,7 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AWN"
|
||||
TITLE = "Abfallwirtschaft Neckar-Odenwald-Kreis"
|
||||
DESCRIPTION = "Source for AWN (Abfallwirtschaft Neckar-Odenwald-Kreis)."
|
||||
URL = "https://www.awn-online.de"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AWR"
|
||||
TITLE = "Abfallwirtschaft Rendsburg"
|
||||
DESCRIPTION = "Source for Abfallwirtschaft Rendsburg"
|
||||
URL = "https://www.awr.de"
|
||||
TEST_CASES = {
|
||||
@@ -24,7 +23,8 @@ class Source:
|
||||
def fetch(self):
|
||||
# retrieve list of cities
|
||||
r = requests.get("https://www.awr.de/api_v2/collection_dates/1/orte")
|
||||
cities = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
cities = r.json()
|
||||
|
||||
# create city to id map from retrieved cities
|
||||
city_to_id = {
|
||||
@@ -32,8 +32,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._city not in city_to_id:
|
||||
_LOGGER.error(f"city not found: {self._city}")
|
||||
return []
|
||||
raise Exception(f"city not found: {self._city}")
|
||||
|
||||
cityId = city_to_id[self._city]
|
||||
|
||||
@@ -41,7 +40,8 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://www.awr.de/api_v2/collection_dates/1/ort/{cityId}/strassen"
|
||||
)
|
||||
streets = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
streets = r.json()
|
||||
|
||||
# create street to id map from retrieved cities
|
||||
street_to_id = {
|
||||
@@ -50,8 +50,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._street not in street_to_id:
|
||||
_LOGGER.error(f"street not found: {self._street}")
|
||||
return []
|
||||
raise Exception(f"street not found: {self._street}")
|
||||
|
||||
streetId = street_to_id[self._street]
|
||||
|
||||
@@ -59,7 +58,8 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://www.awr.de/api_v2/collection_dates/1/ort/{cityId}/abfallarten"
|
||||
)
|
||||
waste_types = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
waste_types = r.json()
|
||||
wt = "-".join([t["id"] for t in waste_types["abfallarten"]])
|
||||
|
||||
# get ics file
|
||||
@@ -73,4 +73,3 @@ class Source:
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
return entries
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "AWSH"
|
||||
TITLE = "Abfallwirtschaft Südholstein"
|
||||
DESCRIPTION = "Source for Abfallwirtschaft Südholstein"
|
||||
URL = "https://www.awsh.de"
|
||||
TEST_CASES = {
|
||||
@@ -24,7 +23,8 @@ class Source:
|
||||
def fetch(self):
|
||||
# retrieve list of cities
|
||||
r = requests.get("https://www.awsh.de/api_v2/collection_dates/1/orte")
|
||||
cities = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
cities = r.json()
|
||||
|
||||
# create city to id map from retrieved cities
|
||||
city_to_id = {
|
||||
@@ -32,8 +32,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._city not in city_to_id:
|
||||
_LOGGER.error(f"city not found: {self._city}")
|
||||
return []
|
||||
raise Exception(f"city not found: {self._city}")
|
||||
|
||||
cityId = city_to_id[self._city]
|
||||
|
||||
@@ -41,7 +40,8 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/strassen"
|
||||
)
|
||||
streets = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
streets = r.json()
|
||||
|
||||
# create street to id map from retrieved cities
|
||||
street_to_id = {
|
||||
@@ -50,8 +50,7 @@ class Source:
|
||||
}
|
||||
|
||||
if self._street not in street_to_id:
|
||||
_LOGGER.error(f"street not found: {self._street}")
|
||||
return []
|
||||
raise Exception(f"street not found: {self._street}")
|
||||
|
||||
streetId = street_to_id[self._street]
|
||||
|
||||
@@ -59,13 +58,15 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/abfallarten"
|
||||
)
|
||||
waste_types = json.loads(r.text)
|
||||
r.raise_for_status()
|
||||
waste_types = r.json()
|
||||
wt = "-".join([t["id"] for t in waste_types["abfallarten"]])
|
||||
|
||||
# get ics file
|
||||
r = requests.get(
|
||||
f"https://www.awsh.de/api_v2/collection_dates/1/ort/{cityId}/strasse/{streetId}/hausnummern/0/abfallarten/{wt}/kalender.ics"
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = 'Banyule City Council'
|
||||
DESCRIPTION = 'Source for Banyule City Council rubbish collection.'
|
||||
URL = 'https://www.banyule.vic.gov.au/binday'
|
||||
URL = 'https://www.banyule.vic.gov.au'
|
||||
TEST_CASES = {
|
||||
'Monday A': {'street_address': '6 Mandall Avenue, IVANHOE'},
|
||||
'Monday A Geolocation ID': {'geolocation_id': '4f7ebfca-1526-4363-8b87-df3103a10a87'},
|
||||
|
||||
@@ -5,7 +5,15 @@ from html.parser import HTMLParser
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Berline Recycling"
|
||||
# With verify=True the POST fails due to a SSLCertVerificationError.
|
||||
# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
|
||||
# These two lines areused to suppress the InsecureRequestWarning when using verify=False
|
||||
import urllib3
|
||||
urllib3.disable_warnings()
|
||||
|
||||
TITLE = "Berlin Recycling"
|
||||
DESCRIPTION = "Source for Berlin Recycling waste collection."
|
||||
URL = "https://berlin-recycling.de"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -4,6 +4,7 @@ import requests
|
||||
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
from waste_collection_schedule.service.SSLError import get_legacy_session
|
||||
|
||||
TITLE = "Bielefeld"
|
||||
DESCRIPTION = "Source for Stadt Bielefeld."
|
||||
@@ -45,12 +46,17 @@ class Source:
|
||||
self._ics = ICS()
|
||||
|
||||
def fetch(self):
|
||||
session = requests.session()
|
||||
s = get_legacy_session()
|
||||
# session = requests.session()
|
||||
|
||||
r = session.get(
|
||||
r = s.get(
|
||||
SERVLET,
|
||||
params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
|
||||
)
|
||||
# r = session.get(
|
||||
# SERVLET,
|
||||
# params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
|
||||
# )
|
||||
r.raise_for_status()
|
||||
r.encoding = "utf-8"
|
||||
|
||||
@@ -68,17 +74,26 @@ class Source:
|
||||
args["ContainerGewaehlt_2"] = "on"
|
||||
args["ContainerGewaehlt_3"] = "on"
|
||||
args["ContainerGewaehlt_4"] = "on"
|
||||
r = session.post(
|
||||
|
||||
r = s.post(
|
||||
SERVLET,
|
||||
data=args,
|
||||
)
|
||||
# r = session.post(
|
||||
# SERVLET,
|
||||
# data=args,
|
||||
# )
|
||||
r.raise_for_status()
|
||||
|
||||
args["SubmitAction"] = "forward"
|
||||
r = session.post(
|
||||
r = s.post(
|
||||
SERVLET,
|
||||
data=args,
|
||||
)
|
||||
# r = session.post(
|
||||
# SERVLET,
|
||||
# data=args,
|
||||
# )
|
||||
r.raise_for_status()
|
||||
|
||||
reminder_day = "keine Erinnerung" # "keine Erinnerung", "am Vortag", "2 Tage vorher", "3 Tage vorher"
|
||||
@@ -88,10 +103,14 @@ class Source:
|
||||
args["SubmitAction"] = "filedownload_ICAL"
|
||||
args["ICalErinnerung"] = reminder_day
|
||||
args["ICalZeit"] = reminder_time
|
||||
r = session.post(
|
||||
r = s.post(
|
||||
SERVLET,
|
||||
data=args,
|
||||
)
|
||||
# r = session.post(
|
||||
# SERVLET,
|
||||
# data=args,
|
||||
# )
|
||||
r.raise_for_status()
|
||||
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
@@ -5,7 +5,7 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "BMV.at"
|
||||
TITLE = "Burgenländischer Müllverband"
|
||||
DESCRIPTION = "Source for BMV, Austria"
|
||||
URL = "https://www.bmv.at"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -4,7 +4,7 @@ import requests
|
||||
from dateutil import parser
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "bracknell-forest.gov.uk"
|
||||
TITLE = "Bracknell Forest Council"
|
||||
DESCRIPTION = "Bracknell Forest Council, UK - Waste Collection"
|
||||
URL = "https://selfservice.mybfc.bracknell-forest.gov.uk"
|
||||
TEST_CASES = {
|
||||
@@ -13,7 +13,8 @@ TEST_CASES = {
|
||||
"32 Ashbourne": {"house_number": "32", "post_code": "RG12 8SG"},
|
||||
"1 Acacia Avenue": {"house_number": "1", "post_code": "GU47 0RU"},
|
||||
}
|
||||
ICONS = {
|
||||
|
||||
ICON_MAP = {
|
||||
"General Waste": "mdi:trash-can",
|
||||
"Recycling": "mdi:recycle",
|
||||
"Garden": "mdi:leaf",
|
||||
@@ -68,7 +69,7 @@ class Source:
|
||||
collection_lookup.raise_for_status()
|
||||
collections = collection_lookup.json()["response"]["collections"]
|
||||
entries = []
|
||||
for waste_type in ICONS.keys():
|
||||
for waste_type in ICON_MAP.keys():
|
||||
try:
|
||||
entries.append(
|
||||
Collection(
|
||||
@@ -78,7 +79,7 @@ class Source:
|
||||
]["date"]
|
||||
).date(),
|
||||
t=waste_type,
|
||||
icon=ICONS[waste_type],
|
||||
icon=ICON_MAP[waste_type],
|
||||
)
|
||||
)
|
||||
except (StopIteration, TypeError):
|
||||
|
||||
@@ -11,18 +11,19 @@ import http.client as http_client
|
||||
import ssl
|
||||
import urllib3
|
||||
|
||||
TITLE = "Bradford.gov.uk"
|
||||
TITLE = "Bradford Metropolitan District Council"
|
||||
DESCRIPTION = (
|
||||
"Source for Bradford.gov.uk services for Bradford Metropolitan Council, UK."
|
||||
)
|
||||
URL = "https://onlineforms.bradford.gov.uk/ufs/"
|
||||
URL = "https://bradford.gov.uk"
|
||||
TEST_CASES = {
|
||||
"Ilkley": {"uprn": "100051250665"},
|
||||
"Bradford": {"uprn": "100051239296"},
|
||||
"Baildon": {"uprn": "10002329242"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
API_URL = "https://onlineforms.bradford.gov.uk/ufs/"
|
||||
ICON_MAP = {
|
||||
"REFUSE": "mdi:trash-can",
|
||||
"RECYCLING": "mdi:recycle",
|
||||
"GARDEN": "mdi:leaf",
|
||||
@@ -59,7 +60,7 @@ class Source:
|
||||
s.cookies.set(
|
||||
"COLLECTIONDATES", self._uprn, domain="onlineforms.bradford.gov.uk"
|
||||
)
|
||||
r = s.get(f"{URL}/collectiondates.eb")
|
||||
r = s.get(f"{API_URL}/collectiondates.eb")
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
div = soup.find_all("table", {"role": "region"})
|
||||
@@ -87,7 +88,7 @@ class Source:
|
||||
entry.text.strip(), "%a %b %d %Y"
|
||||
).date(),
|
||||
t=type,
|
||||
icon=ICONS[type],
|
||||
icon=ICON_MAP[type],
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
|
||||
@@ -3,7 +3,7 @@ from bs4 import BeautifulSoup
|
||||
from dateutil import parser
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "braintree.gov.uk"
|
||||
TITLE = "Braintree District Council"
|
||||
DESCRIPTION = "Braintree District Council, UK - Waste Collection"
|
||||
URL = "https://www.braintree.gov.uk"
|
||||
TEST_CASES = {
|
||||
@@ -13,7 +13,7 @@ TEST_CASES = {
|
||||
"20 Peel Crescent": {"house_number": "20", "post_code": "CM7 2RS"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Grey Bin": "mdi:trash-can",
|
||||
"Clear Sack": "mdi:recycle",
|
||||
"Green Bin": "mdi:leaf",
|
||||
@@ -51,7 +51,7 @@ class Source:
|
||||
Collection(
|
||||
date=parser.parse(collection_date).date(),
|
||||
t=collection_type,
|
||||
icon=ICONS[collection_type]
|
||||
icon=ICON_MAP[collection_type]
|
||||
)
|
||||
)
|
||||
except (StopIteration, TypeError):
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Breckland Council"
|
||||
DESCRIPTION = "Source for breckland.gov.uk"
|
||||
URL = "https://www.breckland.gov.uk/mybreckland"
|
||||
TEST_CASES = {
|
||||
"test1": {"postcode": "IP22 2LJ", "address": "glen travis"},
|
||||
"test2": {"uprn": "10011977093"},
|
||||
}
|
||||
|
||||
API_URL = "https://www.breckland.gov.uk/apiserver/ajaxlibrary"
|
||||
ICON_MAP = {
|
||||
"Refuse Collection Service": "mdi:trash-can",
|
||||
"Recycling Collection Service": "mdi:recycle",
|
||||
"Garden Waste Service": "mdi:leaf",
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
headers = {"referer": URL}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, postcode=None, address=None, uprn=None):
|
||||
self._postcode = postcode
|
||||
self._address = address
|
||||
self._uprn = uprn
|
||||
|
||||
if postcode is None and address is None and uprn is None:
|
||||
raise Exception("no attributes - specify postcode and address or just uprn")
|
||||
|
||||
def fetch(self):
|
||||
if self._uprn is None:
|
||||
json_data = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"method": "Breckland.Whitespace.JointWasteAPI.GetSiteIDsByPostcode",
|
||||
"params": {"postcode": self._postcode, "environment": "live"},
|
||||
}
|
||||
|
||||
r = requests.post(API_URL, json=json_data, headers=headers)
|
||||
r.raise_for_status()
|
||||
|
||||
json_response = r.json()
|
||||
|
||||
for key in json_response["result"]:
|
||||
if self._address.lower() in key["name"].lower():
|
||||
self._uprn = key["uprn"]
|
||||
|
||||
if self._uprn is None:
|
||||
raise Exception("Error querying calendar data")
|
||||
|
||||
json_data = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"method": "Breckland.Whitespace.JointWasteAPI.GetBinCollectionsByUprn",
|
||||
"params": {"uprn": self._uprn, "environment": "live"},
|
||||
}
|
||||
|
||||
r = requests.post(API_URL, json=json_data, headers=headers)
|
||||
r.raise_for_status()
|
||||
|
||||
waste = r.json()["result"]
|
||||
|
||||
entries = []
|
||||
|
||||
for data in waste:
|
||||
entries.append(
|
||||
Collection(
|
||||
datetime.strptime(
|
||||
data["nextcollection"], "%d/%m/%Y %H:%M:%S"
|
||||
).date(),
|
||||
data["collectiontype"],
|
||||
ICON_MAP.get(data["collectiontype"]),
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
@@ -6,7 +6,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Brisbane City Council"
|
||||
DESCRIPTION = "Source for Brisbane City Council rubbish collection."
|
||||
URL = "https://www.brisbane.qld.gov.au/clean-and-green/rubbish-tips-and-bins/rubbish-collections/bin-collection-calendar"
|
||||
URL = "https://www.brisbane.qld.gov.au"
|
||||
TEST_CASES = {
|
||||
"Suburban Social": {
|
||||
"suburb": "Chapel Hill",
|
||||
|
||||
@@ -4,7 +4,7 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "BSR"
|
||||
TITLE = "Berliner Stadtreinigungsbetriebe"
|
||||
DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
|
||||
URL = "https://bsr.de"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -0,0 +1,299 @@
|
||||
import datetime
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Literal, Optional, TypedDict, Union
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Bürgerportal"
|
||||
URL = "https://www.c-trace.de"
|
||||
DESCRIPTION = "Source for waste collection in multiple service areas."
|
||||
|
||||
|
||||
def EXTRA_INFO():
|
||||
return [{"title": s["title"], "url": s["url"]} for s in SERVICE_MAP]
|
||||
|
||||
|
||||
TEST_CASES = {
|
||||
"Cochem-Zell": {
|
||||
"operator": "cochem_zell",
|
||||
"district": "Bullay",
|
||||
"subdistrict": "Bullay",
|
||||
"street": "Layenweg",
|
||||
"number": 3,
|
||||
},
|
||||
"Alb-Donau": {
|
||||
"operator": "alb_donau",
|
||||
"district": "Blaubeuren",
|
||||
"street": "Alberstraße",
|
||||
"number": 3,
|
||||
},
|
||||
"Biedenkopf": {
|
||||
"operator": "biedenkopf",
|
||||
"district": "Biedenkopf",
|
||||
"subdistrict": "Breidenstein",
|
||||
"street": "Auf dem Hammer",
|
||||
"number": 1,
|
||||
},
|
||||
}
|
||||
|
||||
ICON_MAP = {
|
||||
"mobil": "mdi:truck",
|
||||
"bio": "mdi:leaf",
|
||||
"papier": "mdi:package-variant",
|
||||
"verpackung": "mdi:recycle",
|
||||
"gelb": "mdi:recycle",
|
||||
"lvp": "mdi:recycle",
|
||||
"rest": "mdi:trash-can",
|
||||
"gruen": "mdi:forest",
|
||||
"grün": "mdi:forest",
|
||||
"baum": "mdi:forest",
|
||||
"schnitt": "mdi:forest",
|
||||
"schad": "mdi:biohazard",
|
||||
}
|
||||
API_HEADERS = {
|
||||
"Accept": "application/json, text/plain;q=0.5",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
Operator = Literal["cochem_zell", "alb_donau", "biedenkopf"]
|
||||
|
||||
SERVICE_MAP = [
|
||||
{
|
||||
"title": "KV Cochem-Zell",
|
||||
"url": "https://www.cochem-zell-online.de/",
|
||||
"api_url": "https://buerger-portal-cochemzell.azurewebsites.net/api",
|
||||
"operator": "cochem_zell",
|
||||
},
|
||||
{
|
||||
"title": "Abfallwirtschaft Alb-Donau-Kreis",
|
||||
"url": "https://www.aw-adk.de/",
|
||||
"api_url": "https://buerger-portal-albdonaukreisabfallwirtschaft.azurewebsites.net/api",
|
||||
"operator": "alb_donau",
|
||||
},
|
||||
{
|
||||
"title": "MZV Bidenkopf",
|
||||
"url": "https://mzv-biedenkopf.de/",
|
||||
"api_url": "https://biedenkopfmzv.buergerportal.digital/api",
|
||||
"operator": "biedenkopf",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# This datalcass is used for adding entries to a set and remove duplicate entries.
|
||||
# The default `Collection` extends the standard dict and thus is not hashable.
|
||||
@dataclass(frozen=True, eq=True)
|
||||
class CollectionEntry:
|
||||
date: datetime.date
|
||||
waste_type: str
|
||||
icon: Optional[str]
|
||||
|
||||
def export(self) -> Collection:
|
||||
return Collection(self.date, self.waste_type, self.icon)
|
||||
|
||||
|
||||
def quote_none(value: Optional[str]) -> str:
|
||||
if value is None:
|
||||
return "null"
|
||||
|
||||
return f"'{value}'"
|
||||
|
||||
|
||||
def get_api_map():
|
||||
return {s["operator"]: s["api_url"] for s in SERVICE_MAP}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(
|
||||
self,
|
||||
operator: Operator,
|
||||
district: str,
|
||||
street: str,
|
||||
subdistrict: Optional[str] = None,
|
||||
number: Union[int, str, None] = None,
|
||||
show_volume: bool = False,
|
||||
):
|
||||
self.api_url = get_api_map()[operator]
|
||||
self.district = district
|
||||
self.subdistrict = subdistrict
|
||||
self.street = street
|
||||
self.number = number
|
||||
self.show_volume = show_volume
|
||||
|
||||
def fetch(self) -> list[Collection]:
|
||||
session = requests.session()
|
||||
session.headers.update(API_HEADERS)
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
entries: set[CollectionEntry] = set()
|
||||
|
||||
district_id = self.fetch_district_id(session)
|
||||
street_id = self.fetch_street_id(session, district_id)
|
||||
# Eventually verify house number in the future
|
||||
|
||||
params = {
|
||||
"$expand": "Abfuhrplan,Abfuhrplan/GefaesstarifArt/Abfallart,Abfuhrplan/GefaesstarifArt/Volumen",
|
||||
"$orderby": "Abfuhrplan/GefaesstarifArt/Abfallart/Name,Abfuhrplan/GefaesstarifArt/Volumen/VolumenWert",
|
||||
"orteId": district_id,
|
||||
"strassenId": street_id,
|
||||
"jahr": year,
|
||||
}
|
||||
|
||||
if self.number:
|
||||
params["hausNr"] = (f"'{self.number}'",)
|
||||
|
||||
res = session.get(
|
||||
f"{self.api_url}/AbfuhrtermineAbJahr",
|
||||
params=params,
|
||||
)
|
||||
res.raise_for_status()
|
||||
payload: CollectionsRes = res.json()
|
||||
|
||||
date_regex = re.compile(r"\d+")
|
||||
|
||||
for collection in payload["d"]:
|
||||
if date_match := re.search(date_regex, collection["Termin"]):
|
||||
timestamp = float(date_match.group())
|
||||
date = datetime.datetime.utcfromtimestamp(timestamp / 1000).date()
|
||||
waste_type = collection["Abfuhrplan"]["GefaesstarifArt"]["Abfallart"][
|
||||
"Name"
|
||||
]
|
||||
icon = None
|
||||
|
||||
for icon_type, tested_icon in ICON_MAP.items():
|
||||
if icon_type.lower() in waste_type.lower():
|
||||
icon = tested_icon
|
||||
|
||||
if self.show_volume:
|
||||
volume = int(
|
||||
collection["Abfuhrplan"]["GefaesstarifArt"]["Volumen"][
|
||||
"VolumenWert"
|
||||
]
|
||||
)
|
||||
waste_type = f"{waste_type} ({volume} l)"
|
||||
|
||||
entries.add(CollectionEntry(date, waste_type, icon))
|
||||
|
||||
if len(entries) == 0:
|
||||
raise ValueError(
|
||||
"No collections found! Please verify that your configuration is correct."
|
||||
)
|
||||
|
||||
return [entry.export() for entry in entries]
|
||||
|
||||
def fetch_district_id(self, session: requests.Session) -> int:
|
||||
res = session.get(
|
||||
f"{self.api_url}/OrteMitOrtsteilen",
|
||||
headers=API_HEADERS,
|
||||
)
|
||||
res.raise_for_status()
|
||||
payload: DistrictsRes = res.json()
|
||||
|
||||
try:
|
||||
return next(
|
||||
entry["OrteId"]
|
||||
for entry in payload["d"]
|
||||
if entry["Ortsname"] == self.district
|
||||
and entry["Ortsteilname"] == self.subdistrict
|
||||
)
|
||||
except StopIteration:
|
||||
raise ValueError(
|
||||
"District id cannot be fetched. "
|
||||
"Please make sure that you entered a subdistrict if there is a comma on the website."
|
||||
)
|
||||
|
||||
def fetch_street_id(self, session: requests.Session, district_id: int):
|
||||
res = session.get(
|
||||
f"{self.api_url}/Strassen",
|
||||
params={
|
||||
"$filter": f"Ort/OrteId eq {district_id} and OrtsteilName eq {quote_none(self.subdistrict)}",
|
||||
"$orderby": "Name asc",
|
||||
},
|
||||
headers=API_HEADERS,
|
||||
)
|
||||
res.raise_for_status()
|
||||
payload: StreetsRes = res.json()
|
||||
|
||||
try:
|
||||
return next(
|
||||
entry["StrassenId"]
|
||||
for entry in payload["d"]
|
||||
if entry["Name"] == self.street
|
||||
)
|
||||
except StopIteration:
|
||||
raise ValueError(
|
||||
"Street ID cannot be fetched. Please verify your configuration."
|
||||
)
|
||||
|
||||
|
||||
# Typed dictionaries for the API
|
||||
# Automatically generated using https://pytyper.dev/
|
||||
|
||||
|
||||
class DistrictRes(TypedDict):
|
||||
OrteId: int
|
||||
Ortsname: str
|
||||
Ortsteilname: Optional[str]
|
||||
|
||||
|
||||
class DistrictsRes(TypedDict):
|
||||
d: List[DistrictRes]
|
||||
|
||||
|
||||
class StreetRes(TypedDict):
|
||||
StrassenId: int
|
||||
Name: str
|
||||
Plz: str
|
||||
|
||||
|
||||
class StreetsRes(TypedDict):
|
||||
d: List[StreetRes]
|
||||
|
||||
|
||||
class Capacity(TypedDict):
|
||||
VolumenId: int
|
||||
VolumenWert: str
|
||||
|
||||
|
||||
class WasteType(TypedDict):
|
||||
AbfallartenId: int
|
||||
Code: str
|
||||
Name: str
|
||||
Farbe: str
|
||||
IsBio: bool
|
||||
IsPapier: bool
|
||||
IsRest: bool
|
||||
IsWertstoff: bool
|
||||
Bemerkung: None
|
||||
Aktiv: None
|
||||
IsSchadstoff: None
|
||||
|
||||
|
||||
class ContainerType(TypedDict):
|
||||
GefaesstarifArtenId: int
|
||||
BescheidText: None
|
||||
BescheidTextLeerungsgebuehr: None
|
||||
Bezeichnung: str
|
||||
GefaesstarifArtVerwenden: bool
|
||||
GefaesstarifArtVerwendenAbfallkalender: bool
|
||||
Bemerkung: None
|
||||
Volumen: Capacity
|
||||
Abfallart: WasteType
|
||||
# Abfuhrrhythmus: Abfuhrrhythmus
|
||||
|
||||
|
||||
class CollectionPlan(TypedDict):
|
||||
AbfuhrplaeneId: int
|
||||
Jahr: int
|
||||
GefaesstarifArt: ContainerType
|
||||
# AbfallartenObj: Abfuhrrhythmus
|
||||
|
||||
|
||||
class CollectionRes(TypedDict):
|
||||
AbfuhrtermineId: int
|
||||
Termin: str
|
||||
Abfuhrplan: CollectionPlan
|
||||
|
||||
|
||||
class CollectionsRes(TypedDict):
|
||||
d: List[CollectionRes]
|
||||
@@ -2,9 +2,23 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "C-Trace.de"
|
||||
TITLE = "C-Trace"
|
||||
DESCRIPTION = "Source for C-Trace.de."
|
||||
URL = "https://c-trace.de/"
|
||||
EXTRA_INFO = [
|
||||
{
|
||||
"title": "Bremener Stadreinigung",
|
||||
"url": "https://www.die-bremer-stadtreinigung.de/",
|
||||
},
|
||||
{
|
||||
"title": "AWB Landkreis Augsburg",
|
||||
"url": "https://www.awb-landkreis-augsburg.de/",
|
||||
},
|
||||
{
|
||||
"title": "WZV Kreis Segeberg",
|
||||
"url": "https://www.wzv.de/",
|
||||
},
|
||||
]
|
||||
TEST_CASES = {
|
||||
"Bremen": {"ort": "Bremen", "strasse": "Abbentorstraße", "hausnummer": 5},
|
||||
"AugsburgLand": {
|
||||
|
||||
@@ -4,11 +4,11 @@ from datetime import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Cambridge.gov.uk"
|
||||
TITLE = "Cambridge City Council"
|
||||
DESCRIPTION = (
|
||||
"Source for cambridge.gov.uk services for Cambridge and part of Cambridgeshire"
|
||||
)
|
||||
URL = "cambridge.gov.uk"
|
||||
URL = "https://cambridge.gov.uk"
|
||||
TEST_CASES = {
|
||||
"houseNumber": {"post_code": "CB13JD", "number": 37},
|
||||
"houseName": {"post_code": "cb215hd", "number": "ROSEMARY HOUSE"},
|
||||
@@ -19,7 +19,7 @@ API_URLS = {
|
||||
"collection": "https://servicelayer3c.azure-api.net/wastecalendar/collection/search/{}/",
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"DOMESTIC": "mdi:trash-can",
|
||||
"RECYCLE": "mdi:recycle",
|
||||
"ORGANIC": "mdi:leaf",
|
||||
@@ -63,7 +63,7 @@ class Source:
|
||||
collection["date"], "%Y-%m-%dT%H:%M:%SZ"
|
||||
).date(),
|
||||
t=round_type.title(),
|
||||
icon=ICONS.get(round_type),
|
||||
icon=ICON_MAP.get(round_type),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "City of Canada Bay Council"
|
||||
DESCRIPTION = "Source for City of Canada Bay Council rubbish collection."
|
||||
URL = "https://www.canadabay.nsw.gov.au/residents/waste-and-recycling/my-bins/my-bin-collection"
|
||||
URL = "https://www.canadabay.nsw.gov.au"
|
||||
TEST_CASES = {
|
||||
"Harry's Shed": {
|
||||
"suburb": "Concord",
|
||||
|
||||
@@ -5,11 +5,11 @@ import json
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "canterbury.gov.uk"
|
||||
TITLE = "Canterbury City Council"
|
||||
DESCRIPTION = (
|
||||
"Source for canterbury.gov.uk services for canterbury"
|
||||
)
|
||||
URL = "canterbury.gov.uk"
|
||||
URL = "https://canterbury.gov.uk"
|
||||
TEST_CASES = {
|
||||
"houseNumber": {"post_code": "ct68ru", "number": "63"},
|
||||
"houseName": {"post_code": "ct68ru", "number": "KOWLOON"},
|
||||
@@ -20,7 +20,7 @@ API_URLS = {
|
||||
"collection": "https://zbr7r13ke2.execute-api.eu-west-2.amazonaws.com/Beta/get-bin-dates",
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"General": "mdi:trash-can",
|
||||
"Recycling": "mdi:recycle",
|
||||
"Food": "mdi:food-apple",
|
||||
@@ -77,7 +77,7 @@ class Source:
|
||||
date, "%Y-%m-%dT%H:%M:%S"
|
||||
).date(),
|
||||
t=collection,
|
||||
icon=ICONS.get(collection),
|
||||
icon=ICON_MAP.get(collection),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -3,9 +3,13 @@ import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
# Include work around for SSL UNSAFE_LEGACY_RENEGOTIATION_DISABLED error
|
||||
from waste_collection_schedule.service.SSLError import get_legacy_session
|
||||
|
||||
|
||||
TITLE = "Christchurch City Council"
|
||||
DESCRIPTION = "Source for Christchurch City Council."
|
||||
URL = "https://ccc.govt.nz/services/rubbish-and-recycling/collections"
|
||||
URL = "https://ccc.govt.nz"
|
||||
TEST_CASES = {"53 Hereford Street": {"address": "53 Hereford Street"}}
|
||||
|
||||
|
||||
@@ -14,6 +18,9 @@ class Source:
|
||||
self._address = address
|
||||
|
||||
def fetch(self):
|
||||
|
||||
s = get_legacy_session()
|
||||
|
||||
entries = []
|
||||
|
||||
# Find the Rating Unit ID by the physical address
|
||||
@@ -24,9 +31,10 @@ class Source:
|
||||
"crs": "epsg:4326",
|
||||
"limit": 1,
|
||||
}
|
||||
r = requests.get(
|
||||
"https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest",
|
||||
|
||||
r = s.get("https://opendata.ccc.govt.nz/CCCSearch/rest/address/suggest",
|
||||
params=addressQuery,
|
||||
# verify=False,
|
||||
)
|
||||
address = r.json()
|
||||
|
||||
@@ -35,9 +43,11 @@ class Source:
|
||||
"client_id": "69f433c880c74c349b0128e9fa1b6a93",
|
||||
"client_secret": "139F3D2A83E34AdF98c80566f2eb7212"
|
||||
}
|
||||
r = requests.get(
|
||||
"https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/" + str(address[0]["RatingUnitID"]),
|
||||
|
||||
# Updated request using SSL code snippet
|
||||
r = s.get("https://ccc-data-citizen-api-v1-prod.au-s1.cloudhub.io/api/v1/properties/" + str(address[0]["RatingUnitID"]),
|
||||
headers=binsHeaders
|
||||
# verify=False,
|
||||
)
|
||||
bins = r.json()
|
||||
|
||||
|
||||
@@ -4,11 +4,9 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "cheshireeast.gov.uk"
|
||||
TITLE = "Cheshire East Council"
|
||||
DESCRIPTION = "Source for cheshireeast.gov.uk services for Cheshire East"
|
||||
URL = "cheshireeast.gov.uk"
|
||||
|
||||
|
||||
URL = "https://cheshireeast.gov.uk"
|
||||
TEST_CASES = {
|
||||
"houseUPRN": {"uprn": "100010132071"},
|
||||
"houseAddress": {"postcode": "WA16 0AY", "name_number": "1"},
|
||||
@@ -43,7 +41,7 @@ class Source:
|
||||
r.raise_for_status()
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
s = soup.find("a", attrs={"class": "get-job-details"})
|
||||
print(s)
|
||||
|
||||
if s is None:
|
||||
raise Exception("address not found")
|
||||
self._uprn = s["data-uprn"]
|
||||
|
||||
@@ -14,12 +14,8 @@ import urllib3
|
||||
urllib3.disable_warnings()
|
||||
|
||||
|
||||
TITLE = "chesterfield.gov.uk"
|
||||
|
||||
DESCRIPTION = (
|
||||
"Source for waste collection services for Chesterfield Borough Council"
|
||||
)
|
||||
|
||||
TITLE = "Chesterfield Borough Council"
|
||||
DESCRIPTION = "Source for waste collection services for Chesterfield Borough Council"
|
||||
URL = "https://www.chesterfield.gov.uk/"
|
||||
|
||||
HEADERS = {
|
||||
@@ -33,13 +29,13 @@ TEST_CASES = {
|
||||
"Test_004": {"uprn": "74020930"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"DOMESTIC REFUSE": "mdi:trash-can",
|
||||
"DOMESTIC RECYCLING": "mdi:recycle",
|
||||
"DOMESTIC ORGANIC": "mdi:leaf",
|
||||
}
|
||||
|
||||
APIS = {
|
||||
API_URLS = {
|
||||
"session": "https://www.chesterfield.gov.uk/bins-and-recycling/bin-collections/check-bin-collections.aspx",
|
||||
"fwuid": "https://myaccount.chesterfield.gov.uk/anonymous/c/cbc_VE_CollectionDaysLO.app?aura.format=JSON&aura.formatAdapter=LIGHTNING_OUT",
|
||||
"search": "https://myaccount.chesterfield.gov.uk/anonymous/aura?r=2&aura.ApexAction.execute=1",
|
||||
@@ -57,13 +53,13 @@ class Source:
|
||||
|
||||
s = requests.Session()
|
||||
r = s.get(
|
||||
APIS["session"],
|
||||
API_URLS["session"],
|
||||
headers=HEADERS,
|
||||
)
|
||||
|
||||
# Capture fwuid value
|
||||
r = s.get(
|
||||
APIS["fwuid"],
|
||||
API_URLS["fwuid"],
|
||||
verify=False,
|
||||
headers=HEADERS,
|
||||
)
|
||||
@@ -83,7 +79,7 @@ class Source:
|
||||
"aura.token": "null",
|
||||
}
|
||||
r = s.post(
|
||||
APIS["search"],
|
||||
API_URLS["search"],
|
||||
data=payload,
|
||||
verify=False,
|
||||
headers=HEADERS,
|
||||
@@ -108,7 +104,7 @@ class Source:
|
||||
Collection(
|
||||
date=dt_local.date(),
|
||||
t=waste_type,
|
||||
icon=ICONS.get(waste_type.upper()),
|
||||
icon=ICON_MAP.get(waste_type.upper()),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
import contextlib
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Abfall Cochem-Zell"
|
||||
DESCRIPTION = "Source for waste collection in district Cochem-Zell."
|
||||
URL = "https://www.cochem-zell-online.de/abfallkalender/"
|
||||
TEST_CASES = {
|
||||
"Alf": {"district": "Alf"},
|
||||
"Bullay": {"district": "Bullay"},
|
||||
"Zell-Stadt": {"district": "Zell-Stadt"},
|
||||
"Pünderich": {"district": "Pünderich"},
|
||||
}
|
||||
|
||||
API_URL = "https://abfallkalender10.app.moba.de/Cochem_Zell/api"
|
||||
REMINDER_DAY = 0 # The calendar event should be on the same day as the waste collection
|
||||
REMINDER_HOUR = 6 # The calendar event should start on any hour of the correct day, so this does not matter much
|
||||
FILENAME = "Abfallkalender.ics"
|
||||
ICON_MAP = {
|
||||
"Biotonne": "mdi:leaf",
|
||||
"Gruengut": "mdi:forest",
|
||||
"Papierabfall": "mdi:package-variant",
|
||||
"Restmülltonne": "mdi:trash-can",
|
||||
"Umweltmobil": "mdi:truck",
|
||||
"Verpackungsabfall": "mdi:recycle",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, district: str):
|
||||
self._district = district
|
||||
self._ics = ICS()
|
||||
|
||||
def fetch(self):
|
||||
now = datetime.now()
|
||||
entries = self._fetch_year(now.year)
|
||||
|
||||
if now.month == 12:
|
||||
# also get data for next year if we are already in december
|
||||
with contextlib.suppress(Exception):
|
||||
entries.extend(self._fetch_year(now.year + 1))
|
||||
|
||||
return entries
|
||||
|
||||
def _fetch_year(self, year: int):
|
||||
url = "/".join(
|
||||
str(param)
|
||||
for param in (
|
||||
API_URL,
|
||||
self._district,
|
||||
year,
|
||||
REMINDER_DAY,
|
||||
REMINDER_HOUR,
|
||||
FILENAME,
|
||||
)
|
||||
)
|
||||
|
||||
r = requests.get(url)
|
||||
schedule = self._ics.convert(r.text)
|
||||
|
||||
return [
|
||||
Collection(
|
||||
date=entry[0], t=entry[1], icon=ICON_MAP.get(entry[1], "mdi:trash-can")
|
||||
)
|
||||
for entry in schedule
|
||||
]
|
||||
@@ -4,7 +4,7 @@ from datetime import datetime, timedelta
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Colchester.gov.uk"
|
||||
TITLE = "Colchester Borough Council"
|
||||
DESCRIPTION = "Source for Colchester.gov.uk services for the borough of Colchester, UK."
|
||||
URL = "https://colchester.gov.uk"
|
||||
TEST_CASES = {
|
||||
@@ -13,7 +13,7 @@ TEST_CASES = {
|
||||
"The Lane, Colchester": {"llpgid": "7cd96a3d-6027-e711-80fa-5065f38b56d1"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Black bags": "mdi:trash-can",
|
||||
"Glass": "mdi:glass-fragile",
|
||||
"Cans": "mdi:trash-can",
|
||||
@@ -63,7 +63,7 @@ class Source:
|
||||
Collection(
|
||||
date=date.date(),
|
||||
t=day["Name"].title(),
|
||||
icon=ICONS[day["Name"]],
|
||||
icon=ICON_MAP[day["Name"]],
|
||||
)
|
||||
)
|
||||
# As Colchester.gov.uk only provides the current collection cycle, the next must be extrapolated
|
||||
@@ -73,7 +73,7 @@ class Source:
|
||||
Collection(
|
||||
date=date.date() + timedelta(days=14),
|
||||
t=day["Name"].title(),
|
||||
icon=ICONS[day["Name"]],
|
||||
icon=ICON_MAP[day["Name"]],
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
|
||||
@@ -4,13 +4,14 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Cornwall Council, UK"
|
||||
TITLE = "Cornwall Council"
|
||||
DESCRIPTION = "Source for cornwall.gov.uk services for Cornwall Council"
|
||||
URL = "cornwall.gov.uk"
|
||||
URL = "https://cornwall.gov.uk"
|
||||
TEST_CASES = {
|
||||
"known_uprn": {"uprn": "100040118005"},
|
||||
"unknown_uprn": {"postcode": "TR261SP", "housenumberorname": "7"},
|
||||
}
|
||||
|
||||
SEARCH_URLS = {
|
||||
"uprn_search": "https://www.cornwall.gov.uk/my-area/",
|
||||
"collection_search": "https://www.cornwall.gov.uk/umbraco/Surface/Waste/MyCollectionDays?subscribe=False",
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
import logging
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "UMWELTPROFIS"
|
||||
TITLE = "Umweltprofis"
|
||||
DESCRIPTION = "Source for Umweltprofis"
|
||||
URL = "https://www.umweltprofis.at"
|
||||
TEST_CASES = {
|
||||
"Ebensee": {"url": "https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1"},
|
||||
"Rohrbach": {"xmlurl": "https://data.umweltprofis.at/opendata/AppointmentService/AppointmentService.asmx/GetTermineForLocationSecured?Key=TEMPKeyabvvMKVCic0cMcmsTEMPKey&StreetNr=118213&HouseNr=Alle&intervall=Alle"},
|
||||
"Ebensee": {
|
||||
"url": "https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1"
|
||||
},
|
||||
"Rohrbach": {
|
||||
"xmlurl": "https://data.umweltprofis.at/opendata/AppointmentService/AppointmentService.asmx/GetTermineForLocationSecured?Key=TEMPKeyabvvMKVCic0cMcmsTEMPKey&StreetNr=118213&HouseNr=Alle&intervall=Alle"
|
||||
},
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def getText(element):
|
||||
s = ""
|
||||
for e in element.childNodes:
|
||||
@@ -22,6 +28,7 @@ def getText(element):
|
||||
s += e.nodeValue
|
||||
return s
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, url=None, xmlurl=None):
|
||||
self._url = url
|
||||
@@ -38,11 +45,11 @@ class Source:
|
||||
|
||||
def fetch_ics(self):
|
||||
r = requests.get(self._url)
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Error querying calendar data")
|
||||
return []
|
||||
r.raise_for_status()
|
||||
|
||||
fixed_text = r.text.replace("REFRESH - INTERVAL; VALUE = ", "REFRESH-INTERVAL;VALUE=")
|
||||
fixed_text = r.text.replace(
|
||||
"REFRESH - INTERVAL; VALUE = ", "REFRESH-INTERVAL;VALUE="
|
||||
)
|
||||
|
||||
dates = self._ics.convert(fixed_text)
|
||||
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from urllib.parse import parse_qs, urlsplit
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlsplit, parse_qs
|
||||
import logging
|
||||
|
||||
TITLE = "Derby.gov.uk"
|
||||
TITLE = "Derby City Council"
|
||||
DESCRIPTION = "Source for Derby.gov.uk services for Derby City Council, UK."
|
||||
URL = "https://secure.derby.gov.uk/binday/"
|
||||
URL = "https://derby.gov.uk"
|
||||
TEST_CASES = {
|
||||
# Derby City council wants specific addresses, hopefully these are generic enough.
|
||||
"Community Of The Holy Name, Morley Road, Derby, DE21 4TB": {
|
||||
@@ -21,7 +20,7 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Black bin": "mdi:trash-can",
|
||||
"Blue bin": "mdi:recycle",
|
||||
"Brown bin": "mdi:leaf",
|
||||
@@ -38,7 +37,7 @@ class Source:
|
||||
self._post_code = post_code
|
||||
self._house_number = house_number
|
||||
if not any([self._premises_id, self._post_code and self._house_number]):
|
||||
_LOGGER.error(
|
||||
raise Exception(
|
||||
"premises_id or post_code and house number must be provided in config"
|
||||
)
|
||||
self._session = requests.Session()
|
||||
@@ -75,7 +74,7 @@ class Source:
|
||||
try:
|
||||
date = datetime.strptime(date.text, "%A, %d %B %Y:").date()
|
||||
except ValueError:
|
||||
_LOGGER.error(f"Skipped {date} as it does not match time format")
|
||||
_LOGGER.info(f"Skipped {date} as it does not match time format")
|
||||
continue
|
||||
img_tag = result.find("img")
|
||||
collection_type = img_tag["alt"]
|
||||
@@ -83,7 +82,7 @@ class Source:
|
||||
Collection(
|
||||
date=date,
|
||||
t=collection_type,
|
||||
icon=ICONS[collection_type],
|
||||
icon=ICON_MAP[collection_type],
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
@@ -3,8 +3,9 @@ from ..collection import Collection
|
||||
|
||||
from ..service.EcoHarmonogramPL import Ecoharmonogram
|
||||
|
||||
TITLE = "Ecoharmonogram"
|
||||
DESCRIPTION = "Source for ecoharmonogram.pl"
|
||||
URL = "ecoharmonogram.pl"
|
||||
URL = "https://ecoharmonogram.pl"
|
||||
TEST_CASES = {
|
||||
"Simple test case": {"town": "Krzeszowice", "street": "Wyki", "house_number": ""},
|
||||
"Sides multi test case": {"town": "Częstochowa", "street": "Boczna", "additional_sides_matcher": "wie"},
|
||||
@@ -14,7 +15,6 @@ TEST_CASES = {
|
||||
"additional_sides_matcher": "Wielorodzinna - powyżej 7 lokali"},
|
||||
"Simple test with community": {"town": "Gdańsk", "street": "Jabłoniowa", "house_number": "55", "additional_sides_matcher": "", "community": "108" },
|
||||
}
|
||||
TITLE = "ecoharmonogram.pl"
|
||||
|
||||
|
||||
class Source:
|
||||
|
||||
@@ -7,7 +7,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "EGN Abfallkalender"
|
||||
DESCRIPTION = "Source for EGN Abfallkalender"
|
||||
URL = "https://www.egn-abfallkalender.de/kalender"
|
||||
URL = "https://www.egn-abfallkalender.de"
|
||||
TEST_CASES = {
|
||||
"Grevenbroich": {
|
||||
"city": "Grevenbroich",
|
||||
@@ -31,7 +31,8 @@ TEST_CASES = {
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
IconMap = {
|
||||
API_URL = "https://www.egn-abfallkalender.de/kalender"
|
||||
ICON_MAP = {
|
||||
"Grau": "mdi:trash-can",
|
||||
"Gelb": "mdi:sack",
|
||||
"Blau": "mdi:package-variant",
|
||||
@@ -48,7 +49,7 @@ class Source:
|
||||
|
||||
def fetch(self):
|
||||
s = requests.session()
|
||||
r = s.get(URL)
|
||||
r = s.get(API_URL)
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
tag = soup.find("meta", {"name": "csrf-token"})
|
||||
@@ -62,14 +63,19 @@ class Source:
|
||||
"street": self._street,
|
||||
"street_number": self._housenumber,
|
||||
}
|
||||
r = s.post(URL, data=post_data, headers=headers)
|
||||
r = s.post(API_URL, data=post_data, headers=headers)
|
||||
|
||||
data = r.json()
|
||||
|
||||
if data.get("error"):
|
||||
for type, errormsg in data["errors"].items():
|
||||
_LOGGER.error(f"{type} - {errormsg}")
|
||||
return []
|
||||
raise Exception(
|
||||
"\n".join(
|
||||
[
|
||||
f"{type} - {errormsg}"
|
||||
for type, errormsg in data["errors"].items()
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
entries = []
|
||||
for year, months in data["waste_discharge"].items():
|
||||
@@ -85,7 +91,7 @@ class Source:
|
||||
.capitalize()
|
||||
)
|
||||
entries.append(
|
||||
Collection(date=date, t=color, icon=IconMap.get(color))
|
||||
Collection(date=date, t=color, icon=ICON_MAP.get(color))
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
@@ -1,23 +1,15 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = 'elmbridge.gov.uk'
|
||||
DESCRIPTION = (
|
||||
'Source for waste collection services for Elmbridge Borough Council'
|
||||
)
|
||||
URL = 'https://www.elmbridge.gov.uk/waste-and-recycling/'
|
||||
|
||||
|
||||
HEADERS = {
|
||||
"user-agent": "Mozilla/5.0",
|
||||
}
|
||||
|
||||
TITLE = "Elmbridge Borough Council"
|
||||
DESCRIPTION = "Source for waste collection services for Elmbridge Borough Council"
|
||||
URL = "https://www.elmbridge.gov.uk"
|
||||
TEST_CASES = {
|
||||
"Test_001" : {"uprn": 10013119164},
|
||||
"Test_001": {"uprn": 10013119164},
|
||||
"Test_002": {"uprn": "100061309206"},
|
||||
"Test_003": {"uprn": 100062119825},
|
||||
"Test_004": {"uprn": "100061343923"},
|
||||
@@ -25,28 +17,32 @@ TEST_CASES = {
|
||||
}
|
||||
|
||||
API_URLS = {
|
||||
'session': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx',
|
||||
'search': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx?action=SetAddress&UniqueId={}',
|
||||
'schedule': 'https://emaps.elmbridge.gov.uk/myElmbridge.aspx?tab=0#Refuse_&_Recycling',
|
||||
"session": "https://emaps.elmbridge.gov.uk/myElmbridge.aspx",
|
||||
"search": "https://emaps.elmbridge.gov.uk/myElmbridge.aspx?action=SetAddress&UniqueId={}",
|
||||
"schedule": "https://emaps.elmbridge.gov.uk/myElmbridge.aspx?tab=0#Refuse_&_Recycling",
|
||||
}
|
||||
|
||||
OFFSETS = {
|
||||
'Monday': 0,
|
||||
'Tuesday': 1,
|
||||
'Wednesday': 2,
|
||||
'Thursday': 3,
|
||||
'Friday': 4,
|
||||
'Saturday': 5,
|
||||
'Sunday': 6,
|
||||
"Monday": 0,
|
||||
"Tuesday": 1,
|
||||
"Wednesday": 2,
|
||||
"Thursday": 3,
|
||||
"Friday": 4,
|
||||
"Saturday": 5,
|
||||
"Sunday": 6,
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"REFUSE": "mdi:trash-can",
|
||||
"RECYCLING": "mdi:recycle",
|
||||
"FOOD": "mdi:food",
|
||||
"GARDEN": "mdi:leaf",
|
||||
}
|
||||
|
||||
HEADERS = {
|
||||
"user-agent": "Mozilla/5.0",
|
||||
}
|
||||
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,63 +57,65 @@ class Source:
|
||||
# This script assumes the week-commencing dates are for the current year.
|
||||
# This'll cause problems in December as upcoming January collections will have been assigned dates in the past.
|
||||
# Some clunky logic can deal with this:
|
||||
# If a date in less than 1 month in the past, it doesn't matter as the collection will have recently occured.
|
||||
# If a date in less than 1 month in the past, it doesn't matter as the collection will have recently occurred.
|
||||
# If a date is more than 1 month in the past, assume it's an incorrectly assigned date and increment the year by 1.
|
||||
# Once that's been done, offset the week-commencing dates to match day of the week each waste collection type is scheduled.
|
||||
# If you have a better way of doing this, feel free to update via a Pull Request!
|
||||
|
||||
# Get current date and year in format consistent with API result
|
||||
today = datetime.now()
|
||||
today = today.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
|
||||
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
year = today.year
|
||||
|
||||
s = requests.Session()
|
||||
|
||||
r0 = s.get(API_URLS['session'], headers=HEADERS)
|
||||
r0 = s.get(API_URLS["session"], headers=HEADERS)
|
||||
r0.raise_for_status()
|
||||
r1 = s.get(API_URLS['search'].format(self._uprn), headers=HEADERS)
|
||||
r1 = s.get(API_URLS["search"].format(self._uprn), headers=HEADERS)
|
||||
r1.raise_for_status()
|
||||
r2 = s.get(API_URLS['schedule'], headers=HEADERS)
|
||||
r2 = s.get(API_URLS["schedule"], headers=HEADERS)
|
||||
r2.raise_for_status()
|
||||
|
||||
responseContent = r2.content
|
||||
soup = BeautifulSoup(responseContent, 'html.parser')
|
||||
soup = BeautifulSoup(responseContent, "html.parser")
|
||||
|
||||
entries = []
|
||||
|
||||
notice = soup.find('div', {'class': 'atPanelContent atFirst atAlt0'})
|
||||
notices = notice.text.replace('\nRefuse and recycling collection days\n', '').split('.')
|
||||
notices.pop(-1) # Remove superflous element
|
||||
frame = soup.find('div', {'class': 'atPanelContent atAlt1 atLast'})
|
||||
table = frame.find('table')
|
||||
notice = soup.find("div", {"class": "atPanelContent atFirst atAlt0"})
|
||||
notices = notice.text.replace(
|
||||
"\nRefuse and recycling collection days\n", ""
|
||||
).split(".")
|
||||
notices.pop(-1) # Remove superfluous element
|
||||
frame = soup.find("div", {"class": "atPanelContent atAlt1 atLast"})
|
||||
table = frame.find("table")
|
||||
|
||||
for tr in table.find_all('tr'):
|
||||
for tr in table.find_all("tr"):
|
||||
row = []
|
||||
for td in tr.find_all('td'):
|
||||
for td in tr.find_all("td"):
|
||||
row.append(td.text.strip())
|
||||
row.pop(1) # removes superflous element
|
||||
dt = row[0] + ' ' + str(year)
|
||||
dt = datetime.strptime(dt, '%d %b %Y')
|
||||
row.pop(1) # removes superfluous element
|
||||
dt = row[0] + " " + str(year)
|
||||
dt = datetime.strptime(dt, "%d %b %Y")
|
||||
|
||||
# Amend year, if necessary
|
||||
if (dt - today) < timedelta(days = -31):
|
||||
dt += timedelta(year = 1)
|
||||
if (dt - today) < timedelta(days=-31):
|
||||
dt = dt.replace(year=dt.year + 1)
|
||||
row[0] = dt
|
||||
|
||||
# Separate out same-day waste collections
|
||||
wastetypes = row[1].split(' + ')
|
||||
wastetypes = row[1].split(" + ")
|
||||
|
||||
# Sort out date offsets for each collection type
|
||||
for waste in wastetypes:
|
||||
for day, offset in OFFSETS.items():
|
||||
for sentence in notices:
|
||||
if (waste in sentence) and (day in sentence):
|
||||
new_date = row[0] + timedelta(days = offset)
|
||||
new_date = row[0] + timedelta(days=offset)
|
||||
entries.append(
|
||||
Collection(
|
||||
date = new_date.date(),
|
||||
t = waste + ' bin',
|
||||
icon = ICONS.get(waste.upper()),
|
||||
date=new_date.date(),
|
||||
t=waste + " bin",
|
||||
icon=ICON_MAP.get(waste.upper()),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -5,24 +5,31 @@ from bs4 import BeautifulSoup
|
||||
from dateutil.parser import parse
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "environmentfirst.co.uk"
|
||||
|
||||
TITLE = "Environment First"
|
||||
URL = "https://environmentfirst.co.uk"
|
||||
EXTRA_INFO = [
|
||||
{
|
||||
"title": "Eastbourne Borough Council",
|
||||
"url": "https://lewes-eastbourne.gov.uk"
|
||||
},
|
||||
{
|
||||
"title": "Lewes District Council",
|
||||
"url": "https://lewes-eastbourne.gov.uk"
|
||||
},
|
||||
]
|
||||
DESCRIPTION = (
|
||||
"""Consolidated source for waste collection services from:
|
||||
Eastbourne Borough Council
|
||||
Lewes District Council
|
||||
"""
|
||||
)
|
||||
|
||||
URL = "https://environmentfirst.co.uk"
|
||||
|
||||
TEST_CASES = {
|
||||
"houseUPRN" : {"uprn": "100060063421"},
|
||||
"houseNumber": {"post_code": "BN228SG", "number": 3},
|
||||
"houseName": {"post_code": "BN73LG", "number": "Garden Cottage"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"RUBBISH": "mdi:trash-can",
|
||||
"RECYCLING": "mdi:recycle",
|
||||
"GARDEN WASTE": "mdi:leaf",
|
||||
@@ -92,13 +99,13 @@ class Source:
|
||||
x = soup.findAll("p")
|
||||
for i in x[1:-1]: # ignores elements containing address and marketing message
|
||||
if " day " in i.text:
|
||||
for round_type in ICONS:
|
||||
for round_type in ICON_MAP:
|
||||
if round_type in i.text.upper():
|
||||
entries.append(
|
||||
Collection(
|
||||
date = parse(str.split(i.text, ":")[1]).date(),
|
||||
t = round_type,
|
||||
icon = ICONS.get(round_type),
|
||||
icon = ICON_MAP.get(round_type),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
import requests
|
||||
import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
import urllib
|
||||
|
||||
TITLE = "Landkreis Erlangen-Höchstadt"
|
||||
DESCRIPTION = "Source for Landkreis Erlangen-Höchstadt"
|
||||
URL = "https://www.erlangen-hoechstadt.de/"
|
||||
TEST_CASES = {
|
||||
"Höchstadt": {"city": "Höchstadt", "street": "Böhmerwaldstraße"},
|
||||
"Brand": {"city": "Eckental", "street": "Eckenhaid, Amselweg"},
|
||||
"Ortsteile": {"city": "Wachenroth", "street": "Wachenroth Ort ink. aller Ortsteile"}
|
||||
"Ortsteile": {"city": "Wachenroth", "street": "Ort inkl. aller Ortsteile"},
|
||||
}
|
||||
|
||||
|
||||
@@ -22,20 +21,25 @@ class Source:
|
||||
self._ics = ICS(split_at=" / ")
|
||||
|
||||
def fetch(self):
|
||||
city = self._city.upper()
|
||||
street = self._street
|
||||
today = datetime.date.today()
|
||||
year = today.year
|
||||
|
||||
payload = {"ort": city, "strasse": street,
|
||||
"abfallart": "Alle", "jahr": year}
|
||||
r = requests.get(
|
||||
"https://www.erlangen-hoechstadt.de/komx/surface/dfxabfallics/GetAbfallIcs", params=payload
|
||||
)
|
||||
r.encoding = r.apparent_encoding
|
||||
dates = self._ics.convert(r.text)
|
||||
dates = self.fetch_year(today.year)
|
||||
if today.month == 12:
|
||||
dates.extend(self.fetch_year(today.year + 1))
|
||||
|
||||
entries = []
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
return entries
|
||||
|
||||
def fetch_year(self, year):
|
||||
city = self._city.upper()
|
||||
street = self._street
|
||||
|
||||
payload = {"ort": city, "strasse": street, "abfallart": "Alle", "jahr": year}
|
||||
r = requests.get(
|
||||
"https://www.erlangen-hoechstadt.de/komx/surface/dfxabfallics/GetAbfallIcs",
|
||||
params=payload,
|
||||
)
|
||||
r.raise_for_status()
|
||||
r.encoding = "utf-8"
|
||||
return self._ics.convert(r.text)
|
||||
|
||||
@@ -1,74 +1,139 @@
|
||||
import logging
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from datetime import datetime
|
||||
from dateutil import parser
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "fccenvironment.co.uk"
|
||||
|
||||
DESCRIPTION = (
|
||||
"""Consolidated source for waste collection services for ~60 local authorities.
|
||||
TITLE = "FCC Environment"
|
||||
DESCRIPTION = """
|
||||
Consolidated source for waste collection services for ~60 local authorities.
|
||||
Currently supports:
|
||||
Market Harborough
|
||||
West Devon (Generic Provider)
|
||||
South Hams (Generic Provider)
|
||||
Market Harborough (Custom Provider)
|
||||
"""
|
||||
)
|
||||
|
||||
URL = "https://fccenvironment.co.uk"
|
||||
EXTRA_INFO = [
|
||||
{
|
||||
"title": "Harborough District Council",
|
||||
"url": "https://harborough.gov.uk"
|
||||
},
|
||||
{
|
||||
"title": "South Hams District Council",
|
||||
"url": "https://southhams.gov.uk/"
|
||||
},
|
||||
{
|
||||
"title": "West Devon Borough Council",
|
||||
"url": "https://www.westdevon.gov.uk/"
|
||||
},
|
||||
]
|
||||
|
||||
TEST_CASES = {
|
||||
"Test_001" : {"uprn": "100030491624"},
|
||||
"Test_002": {"uprn": "100030491614"},
|
||||
"Test_003": {"uprn": "100030493289"},
|
||||
"Test_004": {"uprn": "200001136341"}
|
||||
"14_LE16_9QX": {"uprn": "100030491624"}, # region ommited to test default values
|
||||
"4_LE16_9QX": {"uprn": "100030491614", "region": "harborough"},
|
||||
"16_LE16_7NA": {"uprn": "100030493289", "region": "harborough"},
|
||||
"10_LE16_8ER": {"uprn": "200001136341", "region": "harborough"},
|
||||
"9_PL20_7SH": {"uprn": "10001326315", "region": "westdevon"},
|
||||
"3_PL20_7RY": {"uprn": "10001326041", "region": "westdevon"},
|
||||
"2_PL21_9BN": {"uprn": "100040279446", "region": "southhams"},
|
||||
"4_SL21_0HZ": {"uprn": "100040281987", "region": "southhams"},
|
||||
}
|
||||
|
||||
|
||||
ICONS = {
|
||||
"NON-RECYCLABLE WASTE BIN COLLECTION": "mdi:trash-can",
|
||||
"RECYCLING COLLECTION": "mdi:recycle",
|
||||
"GARDEN WASTE COLLECTION": "mdi:leaf",
|
||||
ICON_MAP = {
|
||||
"Refuse": "mdi:trash-can",
|
||||
"Recycling": "mdi:recycle",
|
||||
"Garden": "mdi:leaf",
|
||||
}
|
||||
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, uprn=None):
|
||||
self._uprn = uprn
|
||||
def __init__(self, uprn: str, region: str = "harborough") -> None:
|
||||
self.uprn = uprn
|
||||
self.region = region
|
||||
|
||||
def fetch(self):
|
||||
def getcollectiondetails(self, endpoint: str) -> list[Collection]:
|
||||
domain = urlparse(endpoint).netloc
|
||||
session = requests.Session()
|
||||
cookies = session.get(f"https://{domain}/")
|
||||
response = session.post(
|
||||
endpoint,
|
||||
headers={
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
},
|
||||
data={
|
||||
"fcc_session_token": cookies.cookies["fcc_session_cookie"],
|
||||
"uprn": self.uprn,
|
||||
},
|
||||
)
|
||||
results = {}
|
||||
for item in response.json()["binCollections"]["tile"]:
|
||||
try:
|
||||
soup = BeautifulSoup(item[0], "html.parser")
|
||||
date = parser.parse(soup.find_all("b")[2].text.split(",")[1].strip()).date()
|
||||
service = soup.text.split("\n")[0]
|
||||
except parser._parser.ParserError:
|
||||
continue
|
||||
|
||||
s = requests.Session()
|
||||
|
||||
if self._uprn:
|
||||
# POST request returns schedule for matching uprn
|
||||
payload = {
|
||||
"Uprn": self._uprn
|
||||
}
|
||||
r = s.post("https://www.fccenvironment.co.uk/harborough/detail-address", data = payload)
|
||||
responseContent = r.text
|
||||
"""
|
||||
Handle duplication before creating the list of Collections
|
||||
"""
|
||||
for type in ICON_MAP:
|
||||
if type in service:
|
||||
if type in results.keys():
|
||||
if date < results[type]:
|
||||
results[type] = date
|
||||
else:
|
||||
results[type] = date
|
||||
|
||||
entries = []
|
||||
# Extract waste types and dates from responseContent
|
||||
soup = BeautifulSoup(responseContent, "html.parser")
|
||||
services = soup.find("div", attrs={"class": "blocks block-your-next-scheduled-bin-collection-days"})
|
||||
items = services.find_all("li")
|
||||
for item in items:
|
||||
date_text = item.find("span", attrs={"class": "pull-right"}).text.strip()
|
||||
for result in results:
|
||||
entries.append(
|
||||
Collection(
|
||||
date=results[result],
|
||||
t=result,
|
||||
icon=ICON_MAP[result],
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
def harborough(self) -> list[Collection]:
|
||||
_icons = {
|
||||
"NON-RECYCLABLE WASTE BIN COLLECTION": "mdi:trash-can",
|
||||
"RECYCLING COLLECTION": "mdi:recycle",
|
||||
"GARDEN WASTE COLLECTION": "mdi:leaf",
|
||||
} # Custom icons to avoid a breaking change
|
||||
r = requests.post("https://www.fccenvironment.co.uk/harborough/detail-address", data={"Uprn": self.uprn})
|
||||
soup = BeautifulSoup(r.text, "html.parser")
|
||||
services = soup.find("div", attrs={"class": "blocks block-your-next-scheduled-bin-collection-days"}).find_all(
|
||||
"li"
|
||||
)
|
||||
entries = []
|
||||
for service in services:
|
||||
for type in _icons:
|
||||
if type.lower() in service.text.lower():
|
||||
try:
|
||||
date = datetime.strptime(date_text, "%d %B %Y").date()
|
||||
except ValueError:
|
||||
date = parser.parse(service.find("span", attrs={"class": "pull-right"}).text.strip()).date()
|
||||
except parser._parser.ParserError:
|
||||
continue
|
||||
else:
|
||||
waste_type = item.text.split(' (')[0]
|
||||
|
||||
entries.append(
|
||||
Collection(
|
||||
date=date,
|
||||
t=waste_type,
|
||||
icon=ICONS.get(waste_type.upper()),
|
||||
t=type,
|
||||
icon=_icons[type.upper()],
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
def fetch(self) -> list[Collection]:
|
||||
if self.region == "harborough":
|
||||
return self.harborough()
|
||||
elif self.region == "westdevon":
|
||||
return self.getcollectiondetails(
|
||||
endpoint="https://westdevon.fccenvironment.co.uk/ajaxprocessor/getcollectiondetails"
|
||||
)
|
||||
elif self.region == "southhams":
|
||||
return self.getcollectiondetails(
|
||||
endpoint="https://waste.southhams.gov.uk/mycollections/getcollectiondetails"
|
||||
)
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
import datetime
|
||||
import urllib
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Landkreis Nordwestmecklenburg"
|
||||
DESCRIPTION = "Source for Landkreis Nordwestmecklenburg"
|
||||
URL = "https://www.geoport-nwm.de"
|
||||
TEST_CASES = {
|
||||
"Rüting": {"district": "Rüting"},
|
||||
"Grevenstein u. ...": {"district": "Grevenstein u. Ausbau"},
|
||||
"Seefeld": {"district": "Seefeld/ Testorf- Steinfort"},
|
||||
"1100l": {"district": "Groß Stieten (1.100 l Behälter)"},
|
||||
"kl. Bünsdorf": {"district": "Klein Bünsdorf"},
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, district):
|
||||
self._district = district
|
||||
self._ics = ICS()
|
||||
|
||||
def fetch(self):
|
||||
today = datetime.date.today()
|
||||
dates = []
|
||||
if today.month == 12:
|
||||
# On Dec 27 2022, the 2022 schedule was no longer available for test case "Seefeld", all others worked
|
||||
try:
|
||||
dates = self.fetch_year(today.year)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
dates.extend(self.fetch_year(today.year + 1))
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
dates = self.fetch_year(today.year)
|
||||
|
||||
entries = []
|
||||
for d in dates:
|
||||
entries.append(Collection(d[0], d[1]))
|
||||
return entries
|
||||
|
||||
def fetch_year(self, year):
|
||||
arg = convert_to_arg(self._district)
|
||||
r = requests.get(
|
||||
f"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics"
|
||||
)
|
||||
r.raise_for_status()
|
||||
return self._ics.convert(r.text)
|
||||
|
||||
|
||||
def convert_to_arg(district):
|
||||
district = district.replace("(1.100 l Behälter)", "1100_l")
|
||||
district = district.replace("ü", "ue")
|
||||
district = district.replace("ö", "oe")
|
||||
district = district.replace("ä", "ae")
|
||||
district = district.replace("ß", "ss")
|
||||
district = district.replace("/", "")
|
||||
district = district.replace("- ", "-")
|
||||
district = district.replace(".", "")
|
||||
district = district.replace(" ", "_")
|
||||
arg = urllib.parse.quote("Ortsteil_" + district)
|
||||
return arg
|
||||
@@ -0,0 +1,78 @@
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Gold Coast City Council"
|
||||
DESCRIPTION = "Source for Gold Coast Council rubbish collection."
|
||||
URL = "https://www.goldcoast.qld.gov.au"
|
||||
TEST_CASES = {
|
||||
"MovieWorx": { "street_address": "50 Millaroo Dr Helensvale" },
|
||||
"The Henchman": { "street_address": "6/8 Henchman Ave Miami" },
|
||||
"Pie Pie": { "street_address": "1887 Gold Coast Hwy Burleigh Heads" }
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
ICON_MAP = { # Dict of waste types and suitable mdi icons
|
||||
"General waste": "mdi:trash-can",
|
||||
"Recycling": "mdi:recycle",
|
||||
"Green organics": "mdi:leaf",
|
||||
}
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_address):
|
||||
self._street_address = street_address
|
||||
|
||||
def fetch(self):
|
||||
session = requests.Session()
|
||||
|
||||
# Making a get request
|
||||
response = session.get(
|
||||
"https://www.goldcoast.qld.gov.au/api/v1/myarea/searchfuzzy?maxresults=1",
|
||||
params={"keywords": self._street_address},
|
||||
)
|
||||
response.raise_for_status()
|
||||
addressSearchApiResults = response.json()
|
||||
if (
|
||||
addressSearchApiResults["Items"] is None
|
||||
or len(addressSearchApiResults["Items"]) < 1
|
||||
):
|
||||
raise Exception(
|
||||
f"Address search for '{self._street_address}' returned no results. Check your address on https://www.goldcoast.qld.gov.au/Services/Waste-recycling/Find-my-bin-day"
|
||||
)
|
||||
|
||||
addressSearchTopHit = addressSearchApiResults["Items"][0]
|
||||
_LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
|
||||
|
||||
geolocationid = addressSearchTopHit["Id"]
|
||||
_LOGGER.debug("Geolocationid: %s", geolocationid)
|
||||
|
||||
response = session.get(
|
||||
"Https://www.goldcoast.qld.gov.au/ocapi/Public/myarea/wasteservices?ocsvclang=en-AU",
|
||||
params={"geolocationid": geolocationid},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
wasteApiResult = response.json()
|
||||
_LOGGER.debug("Waste API result: %s", wasteApiResult)
|
||||
|
||||
soup = BeautifulSoup(wasteApiResult["responseContent"], "html.parser")
|
||||
|
||||
entries = []
|
||||
for article in soup.find_all("article"):
|
||||
waste_type = article.h3.string
|
||||
icon = ICON_MAP.get(waste_type, "mdi:trash-can")
|
||||
next_pickup = article.find(class_="next-service").string.strip()
|
||||
if re.match(r"[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
|
||||
next_pickup_date = datetime.strptime(
|
||||
next_pickup.split(sep=" ")[1], "%d/%m/%Y"
|
||||
).date()
|
||||
entries.append(
|
||||
Collection(date=next_pickup_date, t=waste_type, icon=icon)
|
||||
)
|
||||
|
||||
return entries
|
||||
@@ -4,17 +4,16 @@ from datetime import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Grafikai.svara.lt"
|
||||
DESCRIPTION = "Source for UAB \"Kauno švara\"."
|
||||
TITLE = "Kauno švara"
|
||||
DESCRIPTION = 'Source for UAB "Kauno švara".'
|
||||
URL = "http://grafikai.svara.lt"
|
||||
TEST_CASES = {
|
||||
"Demokratų g. 7, Kaunas": {
|
||||
"region": "Kauno m. sav.",
|
||||
"street": "Demokratų g.",
|
||||
"house_number": "7",
|
||||
"waste_object_ids": [101358, 100858, 100860]
|
||||
"waste_object_ids": [101358, 100858, 100860],
|
||||
},
|
||||
|
||||
"Alytaus g. 2, Išlaužo k., Išlaužo sen. Prienų r. sav.": {
|
||||
"region": "Prienų r. sav.",
|
||||
"street": "Alytaus g.",
|
||||
@@ -23,7 +22,7 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"mišrių atliekų": "mdi:trash-can",
|
||||
"antrinių žaliavų (popierius/plastikas)": "mdi:recycle",
|
||||
"antrinių žaliavų (stiklas)": "mdi:glass-fragile",
|
||||
@@ -34,7 +33,9 @@ ICONS = {
|
||||
class Source:
|
||||
API_URL = "http://grafikai.svara.lt/api/"
|
||||
|
||||
def __init__(self, region, street, house_number, district=None, waste_object_ids=None):
|
||||
def __init__(
|
||||
self, region, street, house_number, district=None, waste_object_ids=None
|
||||
):
|
||||
if waste_object_ids is None:
|
||||
waste_object_ids = []
|
||||
self._region = region
|
||||
@@ -68,10 +69,8 @@ class Source:
|
||||
for collection in data["data"]:
|
||||
try:
|
||||
type = collection["descriptionPlural"].casefold()
|
||||
if self.check_if_waste_object_defined(collection['wasteObjectId']):
|
||||
waste_object_query = {
|
||||
"wasteObjectId": collection['wasteObjectId']
|
||||
}
|
||||
if self.check_if_waste_object_defined(collection["wasteObjectId"]):
|
||||
waste_object_query = {"wasteObjectId": collection["wasteObjectId"]}
|
||||
|
||||
rwo = requests.get(
|
||||
self.API_URL + "schedule",
|
||||
@@ -81,14 +80,13 @@ class Source:
|
||||
self.check_for_error_status(data_waste_object)
|
||||
|
||||
for collection_waste_object in data_waste_object:
|
||||
print(collection_waste_object["date"])
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
collection_waste_object["date"], "%Y-%m-%dT%H:%M:%S"
|
||||
).date(),
|
||||
t=collection["descriptionFmt"].title(),
|
||||
icon=ICONS.get(type, "mdi:trash-can"),
|
||||
icon=ICON_MAP.get(type, "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
@@ -107,6 +105,6 @@ class Source:
|
||||
if "status" in collection:
|
||||
raise Exception(
|
||||
"Error: failed to fetch get data, got status: {}".format(
|
||||
collection['status']
|
||||
collection["status"]
|
||||
)
|
||||
)
|
||||
|
||||
@@ -4,9 +4,8 @@ from datetime import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "guildford.gov.uk"
|
||||
TITLE = "Guildford Borough Council"
|
||||
DESCRIPTION = "Source for guildford.gov.uk services for Guildford, UK."
|
||||
# Find the UPRN of your address using https://www.findmyaddress.co.uk/search
|
||||
URL = "https://guildford.gov.uk"
|
||||
TEST_CASES = {
|
||||
"GU12": {"uprn": "10007060305"},
|
||||
@@ -14,7 +13,7 @@ TEST_CASES = {
|
||||
"GU2": {"uprn": "100061391831"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Refuse": "mdi:trash-can",
|
||||
"Food": "mdi:food-apple",
|
||||
"Recycling": "mdi:recycle",
|
||||
@@ -74,7 +73,7 @@ class Source:
|
||||
collection["NextDate"], "%Y-%m-%dT%H:%M:%S.000Z"
|
||||
).date(),
|
||||
t=collection["FeatureName"],
|
||||
icon=ICONS[collection["FeatureName"]],
|
||||
icon=ICON_MAP[collection["FeatureName"]],
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
|
||||
@@ -4,7 +4,7 @@ from datetime import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Huntingdonshire.gov.uk"
|
||||
TITLE = "Huntingdonshire District Council"
|
||||
DESCRIPTION = "Source for Huntingdonshire.gov.uk services for Huntingdonshire District Council."
|
||||
URL = "https://www.huntingdonshire.gov.uk"
|
||||
TEST_CASES = {
|
||||
@@ -12,7 +12,7 @@ TEST_CASES = {
|
||||
"Inkerman Rise, St. Neots": {"uprn": "10000144271"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Refuse": "mdi:trash-can",
|
||||
"Recycling": "mdi:recycle",
|
||||
"Garden": "mdi:leaf",
|
||||
@@ -45,7 +45,7 @@ class Source:
|
||||
collection["date"], "%Y-%m-%dT%H:%M:%SZ"
|
||||
).date(),
|
||||
t=round_type.title(),
|
||||
icon=ICONS.get(round_type),
|
||||
icon=ICON_MAP.get(round_type),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -1,56 +1,184 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "HVCGroep"
|
||||
TITLE = None
|
||||
DESCRIPTION = "Source for the Dutch HVCGroep waste management."
|
||||
URL = "https://www.hvcgroep.nl/zelf-regelen/afvalkalender"
|
||||
TEST_CASES = {"Tollebeek": {"postal_code": "8309AV", "house_number": "1"}}
|
||||
URL = "https://www.hvcgroep.nl"
|
||||
|
||||
|
||||
def EXTRA_INFO():
|
||||
return [
|
||||
{"title": s["title"], "url": get_main_url(s["api_url"])} for s in SERVICE_MAP
|
||||
]
|
||||
|
||||
|
||||
TEST_CASES = {
|
||||
"Tollebeek": {"postal_code": "8309AV", "house_number": "1"},
|
||||
"Hvgroep: Tollebeek": {
|
||||
"postal_code": "8309AV",
|
||||
"house_number": "1",
|
||||
"service": "hvcgroep",
|
||||
},
|
||||
"Cyclus": {"postal_code": "2841ML", "house_number": "1090", "service": "cyclusnv"},
|
||||
"Mijnblink": {
|
||||
"postal_code": "5741BV",
|
||||
"house_number": "76",
|
||||
"service": "mijnblink",
|
||||
},
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SERVICE_MAP = [
|
||||
{
|
||||
"title": "Alpen an den Rijn",
|
||||
"api_url": "https://afvalkalender.alphenaandenrijn.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Cranendonck",
|
||||
"api_url": "https://afvalkalender.cranendonck.nl",
|
||||
},
|
||||
{
|
||||
"title": "Cyclus NV",
|
||||
"api_url": "https://afvalkalender.cyclusnv.nl",
|
||||
},
|
||||
{
|
||||
"title": "Dar",
|
||||
"api_url": "https://afvalkalender.dar.nl",
|
||||
},
|
||||
{
|
||||
"title": "Den Haag",
|
||||
"api_url": "https://huisvuilkalender.denhaag.nl",
|
||||
},
|
||||
{
|
||||
"title": "GAD",
|
||||
"api_url": "https://inzamelkalender.gad.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Berkelland",
|
||||
"api_url": "https://afvalkalender.gemeenteberkelland.nl",
|
||||
},
|
||||
{
|
||||
"title": "HVC Groep",
|
||||
"api_url": "https://inzamelkalender.hvcgroep.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Lingewaard",
|
||||
"api_url": "https://afvalwijzer.lingewaard.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Middelburg + Vlissingen",
|
||||
"api_url": "https://afvalwijzer.middelburgvlissingen.nl",
|
||||
},
|
||||
{
|
||||
"title": "Mijn Blink",
|
||||
"api_url": "https://mijnblink.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Peel en Maas",
|
||||
"api_url": "https://afvalkalender.peelenmaas.nl",
|
||||
},
|
||||
{
|
||||
"title": "PreZero",
|
||||
"api_url": "https://inzamelwijzer.prezero.nl",
|
||||
},
|
||||
{
|
||||
"title": "Purmerend",
|
||||
"api_url": "https://afvalkalender.purmerend.nl",
|
||||
},
|
||||
{
|
||||
"title": "Reinigingsbedrijf Midden Nederland",
|
||||
"api_url": "https://inzamelschema.rmn.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Schouwen-Duiveland",
|
||||
"api_url": "https://afvalkalender.schouwen-duiveland.nl",
|
||||
},
|
||||
{
|
||||
"title": "Spaarne Landen",
|
||||
"api_url": "https://afvalwijzer.spaarnelanden.nl",
|
||||
},
|
||||
{
|
||||
"title": "Stadswerk 072",
|
||||
"api_url": "https://www.stadswerk072.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Sudwest-Fryslan",
|
||||
"api_url": "https://afvalkalender.sudwestfryslan.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Venray",
|
||||
"api_url": "https://afvalkalender.venray.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Voorschoten",
|
||||
"api_url": "https://afvalkalender.voorschoten.nl",
|
||||
},
|
||||
{
|
||||
"title": "Gemeente Wallre",
|
||||
"api_url": "https://afvalkalender.waalre.nl",
|
||||
},
|
||||
{
|
||||
"title": "ZRD",
|
||||
"api_url": "https://afvalkalender.zrd.nl",
|
||||
},
|
||||
]
|
||||
|
||||
class Source:
|
||||
def __init__(self, postal_code, house_number):
|
||||
self.postal_code = postal_code
|
||||
self.house_number = house_number
|
||||
self.icons = {
|
||||
|
||||
def get_service_name_map():
|
||||
def extract_service_name(api_url):
|
||||
name = api_url.split(".")[-2]
|
||||
name = name.split("/")[-1]
|
||||
return name
|
||||
|
||||
return {extract_service_name(s["api_url"]): s["api_url"] for s in SERVICE_MAP}
|
||||
|
||||
|
||||
def get_main_url(url):
|
||||
x = url.split(".")[-2:]
|
||||
x[0] = x[0].removeprefix("https://")
|
||||
return "https://" + ".".join(x)
|
||||
|
||||
|
||||
ICON_MAP = {
|
||||
"plastic-blik-drinkpak": "mdi:recycle",
|
||||
"gft": "mdi:leaf",
|
||||
"papier-en-karton": "mdi:archive",
|
||||
"restafval": "mdi:trash-can",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, postal_code, house_number, service="hvcgroep"):
|
||||
self.postal_code = postal_code
|
||||
self.house_number = house_number
|
||||
self._url = get_service_name_map()[service]
|
||||
|
||||
def fetch(self):
|
||||
bag_id = 0
|
||||
|
||||
# Retrieve bagid (unique waste management identifier)
|
||||
r = requests.get(
|
||||
f"https://inzamelkalender.hvcgroep.nl/adressen/{self.postal_code}:{self.house_number}"
|
||||
)
|
||||
data = json.loads(r.text)
|
||||
r = requests.get(f"{self._url}/adressen/{self.postal_code}:{self.house_number}")
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
# Something must be wrong, maybe the address isn't valid? No need to do the extra requests so just return here.
|
||||
if len(data) == 0:
|
||||
_LOGGER.error("no data found for this address")
|
||||
return []
|
||||
raise Exception("no data found for this address")
|
||||
|
||||
bag_id = data[0]["bagid"]
|
||||
|
||||
# Retrieve the details about different waste management flows (for example, paper, plastic etc.)
|
||||
r = requests.get(
|
||||
f"https://inzamelkalender.hvcgroep.nl/rest/adressen/{bag_id}/afvalstromen"
|
||||
)
|
||||
waste_flows = json.loads(r.text)
|
||||
r = requests.get(f"{self._url}/rest/adressen/{bag_id}/afvalstromen")
|
||||
r.raise_for_status()
|
||||
waste_flows = r.json()
|
||||
|
||||
# Retrieve the coming pickup dates for waste.
|
||||
r = requests.get(
|
||||
f"https://inzamelkalender.hvcgroep.nl/rest/adressen/{bag_id}/ophaaldata"
|
||||
)
|
||||
data = json.loads(r.text)
|
||||
r = requests.get(f"{self._url}/rest/adressen/{bag_id}/ophaaldata")
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
entries = []
|
||||
|
||||
@@ -62,7 +190,7 @@ class Source:
|
||||
Collection(
|
||||
date=datetime.strptime(item["ophaaldatum"], "%Y-%m-%d").date(),
|
||||
t=waste_details[0]["title"],
|
||||
icon=self.icons.get(waste_details[0]["icon"], "mdi:trash-can"),
|
||||
icon=ICON_MAP.get(waste_details[0]["icon"], "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -39,9 +39,7 @@ class Source:
|
||||
response = requests.get(
|
||||
"https://www.hygea.be/displaycalws.html", params=params
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
return []
|
||||
response.raise_for_status()
|
||||
data = json.loads(response.text)
|
||||
|
||||
entries = []
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import datetime
|
||||
import logging
|
||||
from os import getcwd
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
@@ -94,7 +95,7 @@ TEST_CASES = {
|
||||
"year_field": "year",
|
||||
},
|
||||
"EAW Rheingau Taunus": {
|
||||
"url": "https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429",
|
||||
"url": "https://www.eaw-rheingau-taunus.de/abfallsammlung/abfuhrtermine/feed.ics?tx_vierwdeaw_garbagecalendarics%5Baction%5D=ics&tx_vierwdeaw_garbagecalendarics%5Bcontroller%5D=GarbageCalendar&tx_vierwdeaw_garbagecalendarics%5Bstreet%5D=38",
|
||||
"split_at": ",",
|
||||
},
|
||||
"Recollect, Ottawa": {
|
||||
@@ -196,20 +197,17 @@ class Source:
|
||||
raise RuntimeError(
|
||||
"Error: unknown method to fetch URL, use GET or POST; got {self._method}"
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
r.encoding = "utf-8" # requests doesn't guess the encoding correctly
|
||||
|
||||
# check the return code
|
||||
if not r.ok:
|
||||
_LOGGER.error(
|
||||
"Error: the response is not ok; need code 200, but got code %s"
|
||||
% r.status_code
|
||||
)
|
||||
return []
|
||||
|
||||
return self._convert(r.text)
|
||||
|
||||
def fetch_file(self, file):
|
||||
try:
|
||||
f = open(file)
|
||||
except FileNotFoundError as e:
|
||||
_LOGGER.error(f"Working directory: '{getcwd()}'")
|
||||
raise
|
||||
return self._convert(f.read())
|
||||
|
||||
def _convert(self, data):
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
TITLE = "INFEO"
|
||||
TITLE = "infeo"
|
||||
DESCRIPTION = "Source for INFEO waste collection."
|
||||
URL = "https://www.infeo.at/"
|
||||
EXTRA_INFO = [
|
||||
{
|
||||
"title": "Bogenschütz Entsorgung",
|
||||
"url": "https://bogenschuetz-entsorgung.de",
|
||||
"country": "de",
|
||||
},
|
||||
]
|
||||
TEST_CASES = {"Bogenschütz": {"customer": "bogenschütz", "zone": "Dettenhausen"}}
|
||||
|
||||
|
||||
@@ -20,7 +27,9 @@ class Source:
|
||||
|
||||
def fetch(self):
|
||||
baseUrl = f"https://services.infeo.at/awm/api/{self._customer}/wastecalendar"
|
||||
issueUrl = "https://github.com/mampfes/hacs_waste_collection_schedule/issues/new"
|
||||
issueUrl = (
|
||||
"https://github.com/mampfes/hacs_waste_collection_schedule/issues/new"
|
||||
)
|
||||
|
||||
params = {
|
||||
"showUnpublishedCalendars": "false",
|
||||
@@ -29,16 +38,14 @@ class Source:
|
||||
# get the available published calendar years
|
||||
url = f"{baseUrl}/calendars"
|
||||
response = requests.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
|
||||
# data validation
|
||||
if(response.status_code != 200):
|
||||
_LOGGER.error(f"problems during api calendar year access, please file an issue at {issueUrl} and mention @dm82m and add this: {response.text}")
|
||||
return []
|
||||
|
||||
response = response.json()
|
||||
if len(response) <= 0:
|
||||
_LOGGER.error(f"no calendars found, please file an issue at {issueUrl} and mention @dm82m")
|
||||
return []
|
||||
raise Exception(
|
||||
f"no calendars found, please file an issue at {issueUrl} and mention @dm82m"
|
||||
)
|
||||
|
||||
entries = []
|
||||
|
||||
@@ -54,15 +61,14 @@ class Source:
|
||||
# get available zones for calendar year
|
||||
url = f"{baseUrl}/zones"
|
||||
response = requests.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
|
||||
# data validation
|
||||
if(response.status_code != 200):
|
||||
_LOGGER.error(f"problems during api zones for calendar year access, please file an issue at {issueUrl} and mention @dm82m and add this: {response.text}")
|
||||
return []
|
||||
|
||||
response = response.json()
|
||||
if len(response) <= 0:
|
||||
_LOGGER.warning(f"no zones found for calendar year {calendarYearName}, continuing with next calendar year ...")
|
||||
_LOGGER.warning(
|
||||
f"no zones found for calendar year {calendarYearName}, continuing with next calendar year ..."
|
||||
)
|
||||
continue
|
||||
|
||||
zoneId = 0
|
||||
@@ -73,7 +79,9 @@ class Source:
|
||||
zoneId = zone["id"]
|
||||
|
||||
if zoneId == 0:
|
||||
_LOGGER.warning(f"zone '{self._zone}' not found in calendar year {calendarYearName}, continuing with next calendar year ...")
|
||||
_LOGGER.warning(
|
||||
f"zone '{self._zone}' not found in calendar year {calendarYearName}, continuing with next calendar year ..."
|
||||
)
|
||||
continue
|
||||
|
||||
params = {
|
||||
@@ -85,11 +93,7 @@ class Source:
|
||||
# get ical data for year and zone
|
||||
url = f"{baseUrl}/v2/export"
|
||||
response = requests.get(url, params=params)
|
||||
|
||||
# data validation
|
||||
if(response.status_code != 200):
|
||||
_LOGGER.error(f"problems during api ical data for zone in calendar year, please file an issue at {issueUrl} and mention @dm82m and add this: {response.text}")
|
||||
return []
|
||||
response.raise_for_status()
|
||||
|
||||
dates = self._ics.convert(response.text)
|
||||
|
||||
@@ -98,6 +102,8 @@ class Source:
|
||||
|
||||
# validate that we processed some data and show an error if not
|
||||
if len(entries) <= 0:
|
||||
_LOGGER.error(f"we were not able to get any waste entries for you! please file an issue at {issueUrl} and mention @dm82m and add this zone: '{self._zone}'")
|
||||
_LOGGER.warning(
|
||||
f"we were not able to get any waste entries for you! please file an issue at {issueUrl} and mention @dm82m and add this zone: '{self._zone}'"
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
@@ -6,7 +6,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Inner West Council (NSW)"
|
||||
DESCRIPTION = "Source for Inner West Council (NSW) rubbish collection."
|
||||
URL = "https://www.innerwest.nsw.gov.au/live/waste-and-recycling/bins-and-clean-ups/waste-calendar"
|
||||
URL = "https://www.innerwest.nsw.gov.au"
|
||||
TEST_CASES = {
|
||||
"Random address": {
|
||||
"suburb": "Tempe",
|
||||
|
||||
@@ -7,14 +7,14 @@ from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Ipswich City Council"
|
||||
DESCRIPTION = "Source for Ipswich City Council rubbish collection."
|
||||
URL = "https://www.ipswich.qld.gov.au/live/waste-and-recycling/bin-collection-calendar"
|
||||
URL = "https://www.ipswich.qld.gov.au"
|
||||
TEST_CASES = {
|
||||
"Camira State School": {"street": "184-202 Old Logan Rd", "suburb": "Camira"},
|
||||
"Random": {"street": "50 Brisbane Road", "suburb": "Redbank"},
|
||||
}
|
||||
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Waste Bin": "mdi:trash-can",
|
||||
"Recycle Bin": "mdi:recycle",
|
||||
"FOGO Bin": "mdi:leaf",
|
||||
@@ -91,7 +91,7 @@ class IpswichGovAuParser(HTMLParser):
|
||||
|
||||
self._entries.append(
|
||||
Collection(
|
||||
self._loaded_date, data, icon=ICONS.get(data, "mdi:trash-can")
|
||||
self._loaded_date, data, icon=ICON_MAP.get(data, "mdi:trash-can")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "KAEV Niederlausitz"
|
||||
DESCRIPTION = "Source for Kommunaler Abfallverband niederlausitz waste collection."
|
||||
DESCRIPTION = "Source for Kommunaler Abfallverband Niederlausitz waste collection."
|
||||
URL = "https://www.kaev.de/"
|
||||
URL_ADDRESS = 'https://www.kaev.de/Templates/Content/DetailTourenplanWebsite/ajax.aspx/getAddress'
|
||||
COUNTRY = "de"
|
||||
TEST_CASES = {
|
||||
"Luckau / OT Zieckau": {
|
||||
"abf_suche": "Luckau / OT Zieckau",
|
||||
@@ -21,11 +21,13 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
API_URL = 'https://www.kaev.de/Templates/Content/DetailTourenplanWebsite/ajax.aspx/getAddress'
|
||||
|
||||
def get_kalender_id(search):
|
||||
s=requests.Session()
|
||||
s.get('https://www.kaev.de/')
|
||||
payload={"query": search}
|
||||
resp = s.post(URL_ADDRESS, json=payload).json()
|
||||
resp = s.post(API_URL, json=payload).json()
|
||||
abf_cal = json.loads(resp["d"])
|
||||
return abf_cal
|
||||
|
||||
|
||||
@@ -1,90 +1,123 @@
|
||||
import logging
|
||||
import requests
|
||||
import time
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = 'www.kingston.gov.uk'
|
||||
TITLE = "The Royal Borough of Kingston Council"
|
||||
DESCRIPTION = (
|
||||
'Source for waste collection services for The Royal Borough of Kingston Council'
|
||||
"Source for waste collection services for The Royal Borough of Kingston Council"
|
||||
)
|
||||
URL = 'https://kingston-self.achieveservice.com/service/in_my_area?displaymode=collections'
|
||||
URL = "kingston.gov.uk"
|
||||
|
||||
|
||||
HEADERS = {
|
||||
"user-agent": "Mozilla/5.0",
|
||||
}
|
||||
|
||||
COOKIES = {
|
||||
|
||||
}
|
||||
COOKIES = {}
|
||||
|
||||
TEST_CASES = {
|
||||
"Blagdon Road - number" : {"uprn": 100021772910},
|
||||
"Blagdon Road - string" : {"uprn": "100021772910"},
|
||||
"Blagdon Road - number": {"uprn": 100021772910},
|
||||
"Blagdon Road - string": {"uprn": "100021772910"},
|
||||
}
|
||||
|
||||
API_URLS = {
|
||||
'session': 'https://kingston-self.achieveservice.com/service/In_my_Area_Results?uprn=100021772910&displaymode=collections&altVal=',
|
||||
'auth': 'https://kingston-self.achieveservice.com/authapi/isauthenticated?uri=https%253A%252F%252Fkingston-self.achieveservice.com%252Fservice%252FIn_my_Area_Results%253Fuprn%253D100021772910%2526displaymode%253Dcollections%2526altVal%253D&hostname=kingston-self.achieveservice.com&withCredentials=true',
|
||||
'schedule': 'https://kingston-self.achieveservice.com/apibroker/runLookup?id=601a61f9a3188&repeat_against=&noRetry=true&getOnlyTokens=undefined&log_id=&app_name=AF-Renderer::Self&'
|
||||
"session": "https://kingston-self.achieveservice.com/service/In_my_Area_Results?uprn=100021772910&displaymode=collections&altVal=",
|
||||
"auth": "https://kingston-self.achieveservice.com/authapi/isauthenticated?uri=https%253A%252F%252Fkingston-self.achieveservice.com%252Fservice%252FIn_my_Area_Results%253Fuprn%253D100021772910%2526displaymode%253Dcollections%2526altVal%253D&hostname=kingston-self.achieveservice.com&withCredentials=true",
|
||||
"schedule": "https://kingston-self.achieveservice.com/apibroker/runLookup?id=601a61f9a3188&repeat_against=&noRetry=true&getOnlyTokens=undefined&log_id=&app_name=AF-Renderer::Self&",
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, uprn: str):
|
||||
self._uprn = str(uprn)
|
||||
|
||||
def fetch(self):
|
||||
s = requests.Session()
|
||||
|
||||
#This request sets up the cookies
|
||||
r0 = s.get(API_URLS['session'], headers=HEADERS)
|
||||
# This request sets up the cookies
|
||||
r0 = s.get(API_URLS["session"], headers=HEADERS)
|
||||
r0.raise_for_status()
|
||||
|
||||
#This request gets the session key from the PHPSESSID (in the cookies)
|
||||
authRequest = s.get(API_URLS['auth'], headers=HEADERS)
|
||||
# This request gets the session key from the PHPSESSID (in the cookies)
|
||||
authRequest = s.get(API_URLS["auth"], headers=HEADERS)
|
||||
authData = authRequest.json()
|
||||
sessionKey = authData['auth-session']
|
||||
sessionKey = authData["auth-session"]
|
||||
now = time.time_ns() // 1_000_000
|
||||
|
||||
#now query using the uprn
|
||||
payload = { "formValues": { "Section 1": { "UPRN_FromUrl": { "value": self._uprn }, "borough_code": { "value": "RBK" }, "show_wasteCollection": { "value": "1" }, "echo_borough": { "value": "RBK" }, "echo_uprn": { "value": self._uprn } } } }
|
||||
# now query using the uprn
|
||||
payload = {
|
||||
"formValues": {
|
||||
"Section 1": {
|
||||
"UPRN_FromUrl": {"value": self._uprn},
|
||||
"borough_code": {"value": "RBK"},
|
||||
"show_wasteCollection": {"value": "1"},
|
||||
"echo_borough": {"value": "RBK"},
|
||||
"echo_uprn": {"value": self._uprn},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scheduleRequest = s.post(API_URLS['schedule'] + '&_' + str(now) + '&sid=' + sessionKey , headers=HEADERS, json=payload)
|
||||
data = scheduleRequest.json()['integration']['transformed']['rows_data']['0']
|
||||
print(data)
|
||||
scheduleRequest = s.post(
|
||||
API_URLS["schedule"] + "&_" + str(now) + "&sid=" + sessionKey,
|
||||
headers=HEADERS,
|
||||
json=payload,
|
||||
)
|
||||
data = scheduleRequest.json()["integration"]["transformed"]["rows_data"]["0"]
|
||||
entries = []
|
||||
|
||||
entries.append(Collection(
|
||||
date = datetime.strptime(data['echo_refuse_next_date'], '%Y-%m-%d %H:%M:%S').date(),
|
||||
t = 'refuse bin',
|
||||
icon = 'mdi:trash-can'
|
||||
))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
data["echo_refuse_next_date"], "%Y-%m-%d %H:%M:%S"
|
||||
).date(),
|
||||
t="refuse bin",
|
||||
icon="mdi:trash-can",
|
||||
)
|
||||
)
|
||||
|
||||
entries.append(Collection(
|
||||
date = datetime.strptime(data['echo_food_waste_next_date'], '%Y-%m-%d %H:%M:%S').date(),
|
||||
t = 'food waste bin',
|
||||
icon = 'mdi:trash-can'
|
||||
))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
data["echo_food_waste_next_date"], "%Y-%m-%d %H:%M:%S"
|
||||
).date(),
|
||||
t="food waste bin",
|
||||
icon="mdi:trash-can",
|
||||
)
|
||||
)
|
||||
|
||||
entries.append(Collection(
|
||||
date = datetime.strptime(data['echo_paper_and_card_next_date'], '%Y-%m-%d %H:%M:%S').date(),
|
||||
t = 'paper and card recycling bin',
|
||||
icon = 'mdi:recycle'
|
||||
))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
data["echo_paper_and_card_next_date"], "%Y-%m-%d %H:%M:%S"
|
||||
).date(),
|
||||
t="paper and card recycling bin",
|
||||
icon="mdi:recycle",
|
||||
)
|
||||
)
|
||||
|
||||
entries.append(Collection(
|
||||
date = datetime.strptime(data['echo_mixed_recycling_next_date'], '%Y-%m-%d %H:%M:%S').date(),
|
||||
t = 'mixed recycling bin',
|
||||
icon = 'mdi:recycle'
|
||||
))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
data["echo_mixed_recycling_next_date"], "%Y-%m-%d %H:%M:%S"
|
||||
).date(),
|
||||
t="mixed recycling bin",
|
||||
icon="mdi:recycle",
|
||||
)
|
||||
)
|
||||
|
||||
entries.append(Collection(
|
||||
date = datetime.strptime(data['echo_garden_waste_next_date'], '%Y-%m-%d %H:%M:%S').date(),
|
||||
t = 'garden waste bin',
|
||||
icon = 'mdi:leaf'
|
||||
))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(
|
||||
data["echo_garden_waste_next_date"], "%Y-%m-%d %H:%M:%S"
|
||||
).date(),
|
||||
t="garden waste bin",
|
||||
icon="mdi:leaf",
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
@@ -6,22 +6,25 @@ from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS # type: ignore[attr-defined]
|
||||
|
||||
TITLE = 'Müllabfuhr Korneuburg'
|
||||
DESCRIPTION = 'Source for Stadtservice Korneuburg'
|
||||
URL = 'https://www.korneuburg.gv.at'
|
||||
TITLE = "Stadtservice Korneuburg"
|
||||
DESCRIPTION = "Source for Stadtservice Korneuburg"
|
||||
URL = "https://www.korneuburg.gv.at"
|
||||
TEST_CASES = {
|
||||
"Rathaus": {"street_name": "Hauptplatz", "street_number": 39}, # Teilgebiet 4
|
||||
"Rathaus using Teilgebiet": {
|
||||
"street_name": "SomeStreet",
|
||||
"street_number": "1A",
|
||||
"teilgebiet": "4",
|
||||
}, # Teilgebiet 4
|
||||
"Werft": {"street_name": "Am Hafen", "street_number": 6}, # Teilgebiet 2
|
||||
}
|
||||
|
||||
# Mapping of teilgebiete to calendar urls
|
||||
WASTE_TYPE_URLS = {
|
||||
'1': ('Biomuell_3', 'Restmuell_3', 'Papier_2', 'Gelber_Sack_4'),
|
||||
'2': ('Biomuell_4', 'Restmuell_2', 'Papier_3', 'Gelber_Sack_1'),
|
||||
'3': ('Biomuell_1', 'Restmuell_1', 'Papier_1', 'Gelber_Sack_2'),
|
||||
'4': ('Biomuell_2', 'Restmuell', 'Papier', 'Gelber_Sack_3')
|
||||
}
|
||||
|
||||
TEST_CASES = {
|
||||
"Rathaus": {"street_name": "Hauptplatz", "street_number": 39}, # Teilgebiet 4
|
||||
"Rathaus using Teilgebiet": {"street_name": "SomeStreet", "street_number": "1A", "teilgebiet": "4"}, # Teilgebiet 4
|
||||
"Werft": {"street_name": "Am Hafen", "street_number": 6} # Teilgebiet 2
|
||||
"1": ("Biomuell_3", "Restmuell_3", "Papier_2", "Gelber_Sack_4"),
|
||||
"2": ("Biomuell_4", "Restmuell_2", "Papier_3", "Gelber_Sack_1"),
|
||||
"3": ("Biomuell_1", "Restmuell_1", "Papier_1", "Gelber_Sack_2"),
|
||||
"4": ("Biomuell_2", "Restmuell", "Papier", "Gelber_Sack_3"),
|
||||
}
|
||||
|
||||
|
||||
@@ -29,18 +32,15 @@ class Source:
|
||||
def __init__(self, street_name, street_number, teilgebiet=-1):
|
||||
self.street_name = street_name
|
||||
self.street_number = street_number
|
||||
self.teilgebiet = teilgebiet
|
||||
|
||||
self._region = None
|
||||
self._street_name_id = -1
|
||||
self._street_number_id = -1
|
||||
self._headers = {'User-Agent': 'Mozilla/5.0'}
|
||||
self._cookies = {'ris_cookie_setting': 'g7750'} # Accept Cookie Consent
|
||||
self._headers = {"User-Agent": "Mozilla/5.0"}
|
||||
self._cookies = {"ris_cookie_setting": "g7750"} # Accept Cookie Consent
|
||||
self._ics = ICS()
|
||||
|
||||
if 0 < int(teilgebiet) <= 4:
|
||||
self.region = str(teilgebiet)
|
||||
else:
|
||||
self.region = self.determine_region()
|
||||
|
||||
@staticmethod
|
||||
def extract_street_numbers(soup):
|
||||
|
||||
@@ -53,19 +53,31 @@ class Source:
|
||||
street_number_idx += 1
|
||||
|
||||
possible_numbers = json.loads(
|
||||
scripts[street_number_idx].string[19:].replace('\r\n', '').replace(', ]', ']').replace('\'', '"'))
|
||||
scripts[street_number_idx]
|
||||
.string[19:]
|
||||
.replace("\r\n", "")
|
||||
.replace(", ]", "]")
|
||||
.replace("'", '"')
|
||||
)
|
||||
|
||||
number_dict = dict()
|
||||
|
||||
for idx, street_id in enumerate(possible_numbers):
|
||||
number_dict[street_id[0]] = {e[1]: (e[0], e[2]) for _idx, e in enumerate(possible_numbers[idx][1])}
|
||||
number_dict[street_id[0]] = {
|
||||
e[1]: (e[0], e[2]) for _idx, e in enumerate(possible_numbers[idx][1])
|
||||
}
|
||||
|
||||
return number_dict
|
||||
|
||||
@staticmethod
|
||||
def extract_street_names(soup):
|
||||
street_selector = soup.find("select", {"id": "225991280_boxmuellkalenderstrassedd"}).findAll("option")
|
||||
available_streets = {street.string: int(street["value"]) for _idx, street in enumerate(street_selector)}
|
||||
street_selector = soup.find(
|
||||
"select", {"id": "225991280_boxmuellkalenderstrassedd"}
|
||||
).findAll("option")
|
||||
available_streets = {
|
||||
street.string: int(street["value"])
|
||||
for _idx, street in enumerate(street_selector)
|
||||
}
|
||||
|
||||
return available_streets
|
||||
|
||||
@@ -74,8 +86,8 @@ class Source:
|
||||
region = -1
|
||||
|
||||
for span in soup.findAll("span"):
|
||||
if span.parent.name == 'td' and "teilgebiet" in span.string.lower():
|
||||
region = span.string.split(' ')[1]
|
||||
if span.parent.name == "td" and "teilgebiet" in span.string.lower():
|
||||
region = span.string.split(" ")[1]
|
||||
break
|
||||
|
||||
return region
|
||||
@@ -83,6 +95,9 @@ class Source:
|
||||
def determine_region(self):
|
||||
"""finds the target region for the street and street number"""
|
||||
|
||||
if 0 < int(self.teilgebiet) <= 4:
|
||||
return str(self.teilgebiet)
|
||||
|
||||
# request address selection form
|
||||
url = urljoin(URL, "Rathaus/Buergerservice/Muellabfuhr")
|
||||
page = requests.get(url=url, headers=self._headers, cookies=self._cookies)
|
||||
@@ -95,44 +110,68 @@ class Source:
|
||||
street_found = self.street_name in available_streets.keys()
|
||||
|
||||
if not street_found:
|
||||
raise Exception(f"{self.street_name} not found. Please check back spelling with the official site: {url}")
|
||||
raise Exception(
|
||||
f"{self.street_name} not found. Please check back spelling with the official site: {url}"
|
||||
)
|
||||
|
||||
self._street_name_id = available_streets.get(self.street_name)
|
||||
|
||||
self._street_number_id, street_number_link = number_dict.get(
|
||||
available_streets.get(self.street_name)).get(str(self.street_number), (-1, 'not found'))
|
||||
available_streets.get(self.street_name)
|
||||
).get(str(self.street_number), (-1, "not found"))
|
||||
|
||||
if street_number_link == 'not found':
|
||||
raise Exception(f"{self.street_number} not found. Available numbers for {self.street_name} are\
|
||||
{list(number_dict.get(available_streets['Am Hafen']).keys())}")
|
||||
if street_number_link == "not found":
|
||||
raise Exception(
|
||||
f"{self.street_number} not found. Available numbers for {self.street_name} are\
|
||||
{list(number_dict.get(available_streets['Am Hafen']).keys())}"
|
||||
)
|
||||
|
||||
# add selection cookie
|
||||
self._cookies['riscms_muellkalender'] = str(f"{self._street_name_id}_{self._street_number_id}")
|
||||
self._cookies["riscms_muellkalender"] = str(
|
||||
f"{self._street_name_id}_{self._street_number_id}"
|
||||
)
|
||||
|
||||
# request overview with address selection to get the region
|
||||
url = urljoin(URL, "system/web/kalender.aspx")
|
||||
page = requests.get(url=url, headers=self._headers, cookies=self._cookies,
|
||||
params={"sprache": "1", "menuonr": "225991280", "typids": street_number_link})
|
||||
page = requests.get(
|
||||
url=url,
|
||||
headers=self._headers,
|
||||
cookies=self._cookies,
|
||||
params={
|
||||
"sprache": "1",
|
||||
"menuonr": "225991280",
|
||||
"typids": street_number_link,
|
||||
},
|
||||
)
|
||||
soup = BeautifulSoup(page.content, "html.parser")
|
||||
|
||||
region = self.extract_region(soup)
|
||||
|
||||
if region == -1:
|
||||
raise Exception(f"Region could not be found")
|
||||
raise Exception("Region could not be found")
|
||||
|
||||
return str(region)
|
||||
|
||||
def get_region_links(self):
|
||||
"""traverses the pages for different waste types and collects download links for the iCals"""
|
||||
|
||||
if self._region is None:
|
||||
self._region = self.determine_region()
|
||||
|
||||
# create waste type urls
|
||||
ical_urls = []
|
||||
urls = [urljoin(URL, u) for u in WASTE_TYPE_URLS.get(self.region)]
|
||||
urls = [urljoin(URL, u) for u in WASTE_TYPE_URLS.get(self._region)]
|
||||
|
||||
for u in urls:
|
||||
r = requests.get(url=u, headers=self._headers, cookies=self._cookies)
|
||||
soup = BeautifulSoup(r.content, "html.parser")
|
||||
download_link = soup.findAll("a", {"class": "piwik_download_tracker", "data-trackingtyp": "iCal/Kalender"})
|
||||
download_link = soup.findAll(
|
||||
"a",
|
||||
{
|
||||
"class": "piwik_download_tracker",
|
||||
"data-trackingtyp": "iCal/Kalender",
|
||||
},
|
||||
)
|
||||
if len(download_link):
|
||||
ical_urls.append(urljoin(URL, download_link[0].get("href")))
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@ import requests
|
||||
from waste_collection_schedule import Collection
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "KreisWirtschaftsBetriebe Goslar"
|
||||
TITLE = "Kreiswirtschaftsbetriebe Goslar"
|
||||
DESCRIPTION = "Source for kwb-goslar.de waste collection."
|
||||
URL = "https://www.kwb-goslar.de/Abfallwirtschaft/Abfuhr/"
|
||||
URL = "https://www.kwb-goslar.de"
|
||||
TEST_CASES = {
|
||||
"Berliner Straße (Clausthal-Zellerfeld)": {"pois": "2523.602"},
|
||||
"Braunschweiger Straße (Seesen)": {"pois": "2523.409"},
|
||||
|
||||
@@ -1,19 +1,25 @@
|
||||
from datetime import date
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from datetime import date
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "KWU Entsorgung Landkreis Oder-Spree"
|
||||
DESCRIPTION = "Source for KWU Entsorgung, Germany"
|
||||
URL = "https://www.kwu-entsorgung.de/"
|
||||
|
||||
TEST_CASES = {
|
||||
"Erkner": {"city": "Erkner", "street": "Heinrich-Heine-Straße", "number": "11"},
|
||||
"Bad Saarow": {"city": "Bad Saarow", "street": "Ahornallee", "number": "1"}
|
||||
"Bad Saarow": {"city": "Bad Saarow", "street": "Ahornallee", "number": "1"},
|
||||
}
|
||||
|
||||
HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
|
||||
ICON_MAP = {
|
||||
"Restabfall": "mdi:trash-can-outline",
|
||||
"Gelber Sack": "mdi:recycle",
|
||||
"Papiertonne": "mdi:package-variant",
|
||||
"Biotonne": "mdi:food-apple-outline",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
@@ -22,16 +28,11 @@ class Source:
|
||||
self._street = street
|
||||
self._number = number
|
||||
self._ics = ICS()
|
||||
self._iconMap = {
|
||||
"Restabfall": "mdi:trash-can-outline",
|
||||
"Gelber Sack" : "mdi:recycle",
|
||||
"Papiertonne" : "mdi:package-variant",
|
||||
"Biotonne": "mdi:food-apple-outline",
|
||||
}
|
||||
|
||||
def fetch(self):
|
||||
session = requests.Session()
|
||||
|
||||
|
||||
params = {
|
||||
"city": self._city,
|
||||
"street": self._street,
|
||||
@@ -39,62 +40,94 @@ class Source:
|
||||
"direct": "true",
|
||||
}
|
||||
|
||||
r = requests.get("https://www.kwu-entsorgung.de/inc/wordpress/kal_objauswahl.php", headers=HEADERS)
|
||||
r = requests.get(
|
||||
"https://kalender.kwu-entsorgung.de",
|
||||
headers=HEADERS,
|
||||
verify=False
|
||||
)
|
||||
|
||||
parsed_html = BeautifulSoup(r.text, "html.parser")
|
||||
Orte = parsed_html.find_all('option')
|
||||
Orte = parsed_html.find_all("option")
|
||||
|
||||
for Ort in Orte:
|
||||
if self._city in Ort.text:
|
||||
OrtValue = Ort['value']
|
||||
OrtValue = Ort["value"]
|
||||
break
|
||||
|
||||
r = requests.get("https://www.kwu-entsorgung.de/inc/wordpress/kal_str2ort.php", params={"ort": OrtValue}, headers=HEADERS)
|
||||
r = requests.get(
|
||||
"https://kalender.kwu-entsorgung.de/kal_str2ort.php",
|
||||
params={"ort": OrtValue},
|
||||
headers=HEADERS,
|
||||
verify=False
|
||||
)
|
||||
|
||||
parsed_html = BeautifulSoup(r.text, "html.parser")
|
||||
Strassen = parsed_html.find_all('option')
|
||||
Strassen = parsed_html.find_all("option")
|
||||
|
||||
for Strasse in Strassen:
|
||||
if self._street in Strasse.text:
|
||||
StrasseValue = Strasse['value']
|
||||
StrasseValue = Strasse["value"]
|
||||
break
|
||||
|
||||
r = requests.get("https://www.kwu-entsorgung.de/inc/wordpress/kal_str2ort.php", params={"ort": OrtValue, "strasse": StrasseValue}, headers=HEADERS)
|
||||
parsed_html = BeautifulSoup(r.text, "html.parser")
|
||||
Objekte = parsed_html.find_all('option')
|
||||
r = requests.get(
|
||||
"https://kalender.kwu-entsorgung.de/kal_str2ort.php",
|
||||
params={"ort": OrtValue, "strasse": StrasseValue},
|
||||
headers=HEADERS,
|
||||
verify=False
|
||||
)
|
||||
|
||||
for Objekt in Objekte:
|
||||
if self._number in Objekt.text:
|
||||
ObjektValue = Objekt['value']
|
||||
parsed_html = BeautifulSoup(r.text, "html.parser")
|
||||
objects = parsed_html.find_all("option")
|
||||
|
||||
for obj in objects:
|
||||
if self._number in obj.text:
|
||||
ObjektValue = obj["value"]
|
||||
break
|
||||
|
||||
r = requests.post("https://www.kwu-entsorgung.de/inc/wordpress/kal_uebersicht-2020.php", data={"ort": OrtValue, "strasse": StrasseValue, "objekt": ObjektValue, "jahr": date.today().year}, headers=HEADERS)
|
||||
|
||||
r = requests.post(
|
||||
"https://kalender.kwu-entsorgung.de/kal_uebersicht-2023.php",
|
||||
data={
|
||||
"ort": OrtValue,
|
||||
"strasse": StrasseValue,
|
||||
"objekt": ObjektValue,
|
||||
"jahr": date.today().year
|
||||
},
|
||||
headers=HEADERS,
|
||||
verify=False
|
||||
)
|
||||
|
||||
parsed_html = BeautifulSoup(r.text, "html.parser")
|
||||
Links = parsed_html.find_all('a')
|
||||
Links = parsed_html.find_all("a")
|
||||
|
||||
for Link in Links:
|
||||
if 'ICal herunterladen' in Link.text:
|
||||
ics_url = Link['href']
|
||||
if "ICal herunterladen" in Link.text:
|
||||
ics_url = Link["href"]
|
||||
|
||||
if ics_url is None:
|
||||
raise Exception(f"ics url not found")
|
||||
raise Exception("ics url not found")
|
||||
|
||||
# get ics file
|
||||
r = session.get(ics_url, headers=HEADERS)
|
||||
r = session.get(ics_url, headers=HEADERS, verify=False)
|
||||
r.raise_for_status()
|
||||
|
||||
# parse ics file
|
||||
dates = self._ics.convert(r.text)
|
||||
|
||||
entries = []
|
||||
#for d in dates:
|
||||
# for d in dates:
|
||||
# entries.append(Collection(d[0], d[1]))
|
||||
#return entries
|
||||
# return entries
|
||||
for d in dates:
|
||||
# _LOGGER.error(d)
|
||||
waste_type = d[1].strip()
|
||||
next_pickup_date = d[0]
|
||||
|
||||
entries.append(Collection(date=next_pickup_date, t=waste_type, icon=self._iconMap.get(waste_type,"mdi:trash-can")))
|
||||
entries.append(
|
||||
Collection(
|
||||
date=next_pickup_date,
|
||||
t=waste_type,
|
||||
icon=ICON_MAP.get(waste_type, "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
@@ -3,18 +3,10 @@ import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Source for Rhön Grabfeld"
|
||||
TITLE = "Landkreis Rhön Grabfeld"
|
||||
DESCRIPTION = "Source for Rhönn Grabfeld uses service by offizium."
|
||||
URL = 'https://fs-api-rg.offizium.com/abfalltermine'
|
||||
ICON_MAP = {
|
||||
"Restmüll/Gelber Sack/Biotonne": "mdi:trash-can",
|
||||
"Papiersammlung": "mdi:package-variant",
|
||||
"Problemmüllsammlung": "mdi:biohazard"
|
||||
}
|
||||
EVENT_BLACKLIST = ['Wertstoffhof Mellrichstadt',
|
||||
'Wertstoffhof Bad Königshofen', 'Wertstoffzentrum Bad Neustadt',
|
||||
'Wertstoffsammelstelle Ostheim',
|
||||
'Wertstoffsammelstelle Bischofsheim']
|
||||
URL = "https://www.abfallinfo-rhoen-grabfeld.de/"
|
||||
COUNTRY = "de"
|
||||
TEST_CASES = {
|
||||
"City only": {"city": "Ostheim"},
|
||||
"City + District": {"city": "Ostheim", "district": "Oberwaldbehrungen"},
|
||||
@@ -22,6 +14,19 @@ TEST_CASES = {
|
||||
"empty": {}
|
||||
}
|
||||
|
||||
API_URL = 'https://fs-api-rg.offizium.com/abfalltermine'
|
||||
|
||||
EVENT_BLACKLIST = ['Wertstoffhof Mellrichstadt',
|
||||
'Wertstoffhof Bad Königshofen', 'Wertstoffzentrum Bad Neustadt',
|
||||
'Wertstoffsammelstelle Ostheim',
|
||||
'Wertstoffsammelstelle Bischofsheim']
|
||||
|
||||
ICON_MAP = {
|
||||
"Restmüll/Gelber Sack/Biotonne": "mdi:trash-can",
|
||||
"Papiersammlung": "mdi:package-variant",
|
||||
"Problemmüllsammlung": "mdi:biohazard"
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city: str = None, district: str = None):
|
||||
@@ -31,7 +36,7 @@ class Source:
|
||||
def fetch(self):
|
||||
now = datetime.datetime.now().date()
|
||||
|
||||
r = requests.get(URL, params={
|
||||
r = requests.get(API_URL, params={
|
||||
"stadt": self._city,
|
||||
"ortsteil": self._district
|
||||
})
|
||||
|
||||
@@ -4,12 +4,9 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
TITLE = "Landkreis-Wittmund.de"
|
||||
TITLE = "Landkreis Wittmund"
|
||||
DESCRIPTION = "Source for Landkreis Wittmund waste collection."
|
||||
URL = "https://www.landkreis-wittmund.de/Leben-Wohnen/Wohnen/Abfall/Abfuhrkalender/"
|
||||
AUTOCOMPLETE_URL = "https://www.landkreis-wittmund.de/output/autocomplete.php?out=json&type=abto&mode=&select=2&refid={}&term="
|
||||
DOWNLOAD_URL = "https://www.landkreis-wittmund.de/output/options.php?ModID=48&call=ical&ArtID%5B0%5D=3105.1&ArtID%5B1%5D=1.4&ArtID%5B2%5D=1.2&ArtID%5B3%5D=1.3&ArtID%5B4%5D=1.1&pois={}&alarm=0"
|
||||
|
||||
URL = "https://www.landkreis-wittmund.de"
|
||||
TEST_CASES = {
|
||||
"CityWithoutStreet": {
|
||||
"city": "Werdum",
|
||||
@@ -20,6 +17,11 @@ TEST_CASES = {
|
||||
},
|
||||
}
|
||||
|
||||
API_URL = "https://www.landkreis-wittmund.de/Leben-Wohnen/Wohnen/Abfall/Abfuhrkalender/"
|
||||
AUTOCOMPLETE_URL = "https://www.landkreis-wittmund.de/output/autocomplete.php?out=json&type=abto&mode=&select=2&refid={}&term="
|
||||
DOWNLOAD_URL = "https://www.landkreis-wittmund.de/output/options.php?ModID=48&call=ical&ArtID%5B0%5D=3105.1&ArtID%5B1%5D=1.4&ArtID%5B2%5D=1.2&ArtID%5B3%5D=1.3&ArtID%5B4%5D=1.1&pois={}&alarm=0"
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city, street=None):
|
||||
self._city = city
|
||||
@@ -36,11 +38,11 @@ class Source:
|
||||
return tag['value'] != "" and tag.string == self._city
|
||||
|
||||
def fetch_city_id(self, cityName):
|
||||
r = requests.get(URL)
|
||||
r = requests.get(API_URL)
|
||||
if not r.ok:
|
||||
raise Exception(
|
||||
"Error: failed to fetch url: {}".format(
|
||||
URL
|
||||
API_URL
|
||||
)
|
||||
)
|
||||
|
||||
@@ -64,7 +66,7 @@ class Source:
|
||||
|
||||
def fetch_street_id(self, cityId, streetName):
|
||||
r = requests.get(AUTOCOMPLETE_URL.format(cityId, streetName), headers={
|
||||
"Referer": URL
|
||||
"Referer": API_URL
|
||||
})
|
||||
|
||||
if not r.ok:
|
||||
@@ -100,7 +102,7 @@ class Source:
|
||||
|
||||
def fetch_ics(self, url):
|
||||
r = requests.get(url, headers={
|
||||
"Referer": URL
|
||||
"Referer": API_URL
|
||||
})
|
||||
|
||||
if not r.ok:
|
||||
|
||||
@@ -8,7 +8,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Lerum Vatten och Avlopp"
|
||||
DESCRIPTION = "Source for Lerum Vatten och Avlopp waste collection."
|
||||
URL = "https://vatjanst.lerum.se/FutureWeb/SimpleWastePickup/SimpleWastePickup"
|
||||
URL = "https://vatjanst.lerum.se"
|
||||
TEST_CASES = {
|
||||
"PRO": {"street_address": "Floda stationsväg 5, Floda"},
|
||||
"Polisen": {"street_address": "Göteborgsvägen 16, Lerum"},
|
||||
|
||||
@@ -9,8 +9,7 @@ TITLE = "London Borough of Lewisham"
|
||||
DESCRIPTION = (
|
||||
"Source for services from the London Borough of Lewisham"
|
||||
)
|
||||
|
||||
URL = "lewisham.gov.uk"
|
||||
URL = "https://lewisham.gov.uk"
|
||||
TEST_CASES = {
|
||||
"houseNumber": {"post_code": "SE41LR", "number": 4},
|
||||
"houseName": {"post_code": "SE233TE", "name": "The Haven"},
|
||||
|
||||
@@ -5,16 +5,16 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Abfall Lindau"
|
||||
TITLE = "Lindau"
|
||||
DESCRIPTION = "Source for Lindau waste collection."
|
||||
URL = "https://www.lindau.ch/abfalldaten"
|
||||
URL = "https://www.lindau.ch"
|
||||
TEST_CASES = {
|
||||
"Tagelswangen": {"city": "Tagelswangen"},
|
||||
"Grafstal": {"city": "190"},
|
||||
}
|
||||
|
||||
|
||||
IconMap = {
|
||||
ICON_MAP = {
|
||||
"kehricht": "mdi:trash-can",
|
||||
"grungut": "mdi:leaf",
|
||||
"hackseldienst": "mdi:leaf",
|
||||
@@ -51,7 +51,7 @@ class Source:
|
||||
Collection(
|
||||
date=next_pickup_date,
|
||||
t=waste_type,
|
||||
icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
|
||||
icon=ICON_MAP.get(waste_type_sorted, "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -6,13 +6,12 @@ from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Landkreis Schwäbisch Hall"
|
||||
DESCRIPTION = "Source for lrasha.de - Landkreis Schwäbisch Hall"
|
||||
URL = "http://exchange.cmcitymedia.de/landkreis-schwaebisch-hallt3/wasteCalendarExport.php?location="
|
||||
# https://www.lrasha.de/de/buergerservice/abfallwirtschaft/abfallkalender
|
||||
|
||||
URL = "https://www.lrasha.de"
|
||||
TEST_CASES = {
|
||||
"Ilshofen": {"location": "114"}
|
||||
}
|
||||
|
||||
API_URL = "http://exchange.cmcitymedia.de/landkreis-schwaebisch-hallt3/wasteCalendarExport.php?location="
|
||||
HEADERS = {"user-agent": "Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)"}
|
||||
|
||||
|
||||
@@ -23,7 +22,7 @@ class Source:
|
||||
|
||||
def fetch(self):
|
||||
# get ics file
|
||||
full_url = URL + str(self._location)
|
||||
full_url = API_URL + str(self._location)
|
||||
r = requests.get(full_url, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlsplit, parse_qs
|
||||
import logging
|
||||
|
||||
TITLE = "manchester.gov.uk"
|
||||
TITLE = "Manchester City Council"
|
||||
DESCRIPTION = "Source for bin collection services for Manchester City Council, UK."
|
||||
URL = "https://www.manchester.gov.uk/bincollections/"
|
||||
URL = "https://www.manchester.gov.uk"
|
||||
TEST_CASES = {
|
||||
"domestic": {'uprn': '000077065560'},
|
||||
"domestic": {"uprn": "000077065560"},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
API_URL = "https://www.manchester.gov.uk/bincollections/"
|
||||
ICON_MAP = {
|
||||
"Black / Grey Bin": "mdi:trash-can",
|
||||
"Blue Bin": "mdi:recycle",
|
||||
"Brown Bin": "mdi:glass-fragile",
|
||||
@@ -25,25 +24,15 @@ _LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(
|
||||
self, uprn: int = None
|
||||
):
|
||||
def __init__(self, uprn: int):
|
||||
self._uprn = uprn
|
||||
if not self._uprn:
|
||||
_LOGGER.error(
|
||||
"uprn must be provided in config"
|
||||
)
|
||||
self._session = requests.Session()
|
||||
|
||||
def fetch(self):
|
||||
entries = []
|
||||
|
||||
r = requests.post(
|
||||
URL,
|
||||
data={
|
||||
"mcc_bin_dates_uprn": self._uprn,
|
||||
"mcc_bin_dates_submit": "Go"
|
||||
},
|
||||
API_URL,
|
||||
data={"mcc_bin_dates_uprn": self._uprn, "mcc_bin_dates_submit": "Go"},
|
||||
)
|
||||
|
||||
soup = BeautifulSoup(r.text, features="html.parser")
|
||||
@@ -53,21 +42,18 @@ class Source:
|
||||
date = result.find("p", {"class": "caption"})
|
||||
dates = []
|
||||
dates.append(str(date.text).replace("Next collection ", "", 1))
|
||||
for date in result.find_all('li'):
|
||||
for date in result.find_all("li"):
|
||||
dates.append(date.text)
|
||||
img_tag = result.find("img")
|
||||
collection_type = img_tag["alt"]
|
||||
for current_date in dates:
|
||||
try:
|
||||
date = datetime.strptime(current_date, "%A %d %b %Y").date()
|
||||
entries.append(
|
||||
Collection(
|
||||
date=date,
|
||||
t=collection_type,
|
||||
icon=ICONS[collection_type],
|
||||
icon=ICON_MAP[collection_type],
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_LOGGER.error(f"Skipped {current_date} as it does not match time format")
|
||||
|
||||
return entries
|
||||
|
||||
@@ -0,0 +1,103 @@
|
||||
import html
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
TITLE = "Abfallwirtschaftsbetrieb Landkreis Ahrweiler"
|
||||
URL = "https://www.meinawb.de"
|
||||
DESCRIPTION = "Bin collection service from Kreis Ahrweiler/Germany"
|
||||
API_URL = "https://extdienste01.koblenz.de/WasteManagementAhrweiler/WasteManagementServlet"
|
||||
|
||||
ICON_MAP = {
|
||||
"Restabfall": "mdi:trash-can",
|
||||
"Restabfall Plus": "mdi:trash-can",
|
||||
"Bioabfall": "mdi:leaf",
|
||||
"Altpapier": "mdi:package-variant",
|
||||
"Verpackungen": "mdi:recycle",
|
||||
"Grünabfall / Weihnachtsbäume": "mdi:forest",
|
||||
}
|
||||
TYPES = {
|
||||
"RM": "Restabfall",
|
||||
"RG": "Restabfall Plus",
|
||||
"BM": "Bioabfall",
|
||||
"PA": "Altpapier",
|
||||
"GT": "Verpackungen",
|
||||
"GS": "Grünabfall / Weihnachtsbäume",
|
||||
}
|
||||
|
||||
TEST_CASES = {
|
||||
"Oberzissen": {"city": "Oberzissen", "street": "Lindenstrasse", "house_number": "1"},
|
||||
"Niederzissen": {"city": "Niederzissen", "street": "Brohltalstrasse", "house_number": "189"},
|
||||
"Bad Neuenahr": {"city": "Bad Neuenahr-Ahrweiler", "street": "Hauptstrasse", "house_number": "91",
|
||||
"address_suffix": "A"},
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, city, street, house_number, address_suffix=""):
|
||||
self._city = city
|
||||
self._street = street
|
||||
self._house_number = house_number
|
||||
self._address_suffix = address_suffix
|
||||
self._boundary = "WebKitFormBoundary" + "".join(random.sample(string.ascii_letters + string.digits, 16))
|
||||
|
||||
def __str__(self):
|
||||
return f"{self._city} {self._street} {self._house_number} {self._address_suffix}"
|
||||
|
||||
@staticmethod
|
||||
def _parse_data(data, boundary):
|
||||
result = ""
|
||||
for key, value in data.items():
|
||||
result += f'------{boundary}\r\nContent-Disposition: form-data; name="{key}"\r\n\r\n{value}\r\n'
|
||||
result += f"------{boundary}--\r\n"
|
||||
return result.encode()
|
||||
|
||||
@staticmethod
|
||||
def _parse_response_input(text):
|
||||
parsed = re.findall("<INPUT\\sNAME=\"([^\"]+?)\"\\sID=\"[^\"]+?\"(?:\\sVALUE=\"([^\"]*?)\")?", text)
|
||||
return {k: v for k, v in parsed}
|
||||
|
||||
def _address(self):
|
||||
return {"Ort": self._city, "Strasse": self._street, "Hausnummer": self._house_number,
|
||||
"Hausnummerzusatz": self._address_suffix}
|
||||
|
||||
def _headers(self):
|
||||
return {'Content-Type': f'multipart/form-data; boundary=----{self._boundary}'}
|
||||
|
||||
def _payload(self, last_request, action="", period="", **kwargs):
|
||||
payload = self._parse_response_input(last_request)
|
||||
payload.update({"SubmitAction": action, **kwargs})
|
||||
if period:
|
||||
payload.update({"Zeitraum": html.unescape(period)})
|
||||
return self._parse_data(payload, self._boundary)
|
||||
|
||||
def _get_dates(self, session, init_request, calendar=""):
|
||||
kwargs = {"Ort": self._city, "Strasse": ""}
|
||||
payload = self._payload(init_request, action="CITYCHANGED", period=calendar, **kwargs)
|
||||
city_response = session.post(API_URL, headers=self._headers(), data=payload)
|
||||
payload = self._payload(city_response.text, action="forward", period=calendar, **self._address())
|
||||
final_response = session.post(API_URL, headers=self._headers(), data=payload)
|
||||
if error := re.findall("informationItemsText_1\">([^<]+?)<", final_response.text):
|
||||
_LOGGER.warning(f"{self} - {html.unescape(error[0])}")
|
||||
return []
|
||||
return re.findall('<P ID="TermineDatum([0-9A-Z]+)_\\d+">[A-Z][a-z]. ([0-9.]{10}) </P>', final_response.text)
|
||||
|
||||
def fetch(self):
|
||||
session = requests.Session()
|
||||
init_request = session.get(f"{API_URL}?SubmitAction=wasteDisposalServices&InFrameMode=true").text
|
||||
if calendars := re.findall('NAME="Zeitraum" VALUE=\"([^\"]+?)\"', init_request):
|
||||
dates = [date for calendar in calendars for date in self._get_dates(session, init_request, calendar)]
|
||||
else:
|
||||
dates = self._get_dates(session, init_request)
|
||||
entries = []
|
||||
for bin_type, date in dates:
|
||||
name = TYPES[next(x for x in list(TYPES) if x in bin_type)]
|
||||
entries.append(Collection(datetime.strptime(date, "%d.%m.%Y").date(), name, ICON_MAP[name]))
|
||||
return entries
|
||||
@@ -8,7 +8,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Melton City Council"
|
||||
DESCRIPTION = "Source for Melton City Council rubbish collection."
|
||||
URL = "https://www.melton.vic.gov.au/My-Area"
|
||||
URL = "https://www.melton.vic.gov.au"
|
||||
TEST_CASES = {
|
||||
"Tuesday A": {"street_address": "23 PILBARA AVENUE BURNSIDE 3023"},
|
||||
"Tuesday B": {"street_address": "29 COROWA CRESCENT BURNSIDE 3023"},
|
||||
@@ -45,10 +45,9 @@ class Source:
|
||||
addressSearchApiResults["Items"] is None
|
||||
or len(addressSearchApiResults["Items"]) < 1
|
||||
):
|
||||
_LOGGER.error(
|
||||
raise Exception(
|
||||
f"Address search for '{self._street_address}' returned no results. Check your address on https://www.melton.vic.gov.au/My-Area"
|
||||
)
|
||||
return []
|
||||
|
||||
addressSearchTopHit = addressSearchApiResults["Items"][0]
|
||||
_LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
|
||||
|
||||
@@ -5,21 +5,9 @@ import time
|
||||
from datetime import datetime
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = 'middlesbrough.gov.uk'
|
||||
DESCRIPTION = (
|
||||
'Source for waste collection services for Middlesbrough Council'
|
||||
)
|
||||
URL = 'https://www.middlesbrough.gov.uk/bin-collection-dates'
|
||||
|
||||
|
||||
HEADERS = {
|
||||
"user-agent": "Mozilla/5.0",
|
||||
}
|
||||
|
||||
COOKIES = {
|
||||
|
||||
}
|
||||
|
||||
TITLE = 'Middlesbrough Council'
|
||||
DESCRIPTION = 'Source for waste collection services for Middlesbrough Council'
|
||||
URL = 'https://www.middlesbrough.gov.uk'
|
||||
TEST_CASES = {
|
||||
"Tollesby Road - number" : {"uprn": 100110140843},
|
||||
"Tollesby Road - string" : {"uprn": "100110140843"},
|
||||
@@ -33,6 +21,13 @@ API_URLS = {
|
||||
'schedule': 'https://my.middlesbrough.gov.uk/apibroker/runLookup?id=5d78f40439054&repeat_against=&noRetry=true&getOnlyTokens=undefined&log_id=&app_name=AF-Renderer::Self&'
|
||||
}
|
||||
|
||||
HEADERS = {
|
||||
"user-agent": "Mozilla/5.0",
|
||||
}
|
||||
|
||||
COOKIES = {
|
||||
}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
class Source:
|
||||
|
||||
@@ -33,15 +33,15 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
# those bins, only for the so called "Fyrfack" bins (meaning four slots).
|
||||
#
|
||||
|
||||
TITLE = "Ronneby Miljöteknik Sophämntning"
|
||||
TITLE = "Ronneby Miljöteknik"
|
||||
DESCRIPTION = "Source for Ronneby Miljöteknik waste collection."
|
||||
URL = (
|
||||
"http://www.fyrfackronneby.se/hamtningskalender/"
|
||||
)
|
||||
URL = "http://www.fyrfackronneby.se"
|
||||
TEST_CASES = {
|
||||
"Home": {"street_address": "Hjortsbergavägen 16, Johannishus"}
|
||||
}
|
||||
|
||||
API_URL = "http://www.fyrfackronneby.se/hamtningskalender/"
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_address):
|
||||
|
||||
@@ -9,7 +9,7 @@ from pprint import pprint
|
||||
|
||||
TITLE = "Min Renovasjon"
|
||||
DESCRIPTION = "Source for Norkart Komtek MinRenovasjon (Norway)."
|
||||
URL = "https://www.norkart.no/komtek/renovasjon/"
|
||||
URL = "https://www.norkart.no"
|
||||
|
||||
# **street_code:** \
|
||||
# **county_id:** \
|
||||
@@ -29,16 +29,9 @@ TEST_CASES = {
|
||||
}
|
||||
}
|
||||
|
||||
BASE_URL = "https://komteksky.norkart.no/komtek.renovasjonwebapi/api/"
|
||||
API_URL = "https://komteksky.norkart.no/komtek.renovasjonwebapi/api/"
|
||||
APP_KEY = "AE13DEEC-804F-4615-A74E-B4FAC11F0A30"
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_name, house_number, street_code, county_id):
|
||||
self._street_name = street_name
|
||||
self._house_number = house_number
|
||||
self._street_code = street_code
|
||||
self._county_id = county_id
|
||||
self._icon_map = {
|
||||
ICON_MAP = {
|
||||
"": "mdi:trash-can",
|
||||
"brush": "mdi:trash-can",
|
||||
"elektriskogelektronisk": "mdi:chip",
|
||||
@@ -56,11 +49,16 @@ class Source:
|
||||
"restavfall": "mdi:trash-can",
|
||||
"drikkekartong": "mdi:newspaper-variant-multiple",
|
||||
"papppapirdrikkekartong": "mdi:newspaper-variant-multiple",
|
||||
"trevirke": "mdi:trash-can"
|
||||
|
||||
|
||||
"trevirke": "mdi:trash-can",
|
||||
}
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_name, house_number, street_code, county_id):
|
||||
self._street_name = street_name
|
||||
self._house_number = house_number
|
||||
self._street_code = street_code
|
||||
self._county_id = county_id
|
||||
|
||||
def fetch(self):
|
||||
headers = {
|
||||
'Kommunenr': str(self._county_id),
|
||||
@@ -69,15 +67,13 @@ class Source:
|
||||
}
|
||||
args = {}
|
||||
|
||||
r = requests.get(BASE_URL + 'fraksjoner', params = args, headers = headers)
|
||||
r = requests.get(API_URL + 'fraksjoner', params = args, headers = headers)
|
||||
|
||||
type = {}
|
||||
for f in json.loads(r.content):
|
||||
# pprint(f)
|
||||
icon = "mdi:trash-can"
|
||||
icon_name = re.sub(r"^.*?/(\w+)\.\w{3,4}$", "\\1", f['Ikon'])
|
||||
if icon_name in self._icon_map:
|
||||
icon = self._icon_map[icon_name]
|
||||
icon = ICON_MAP.get(icon_name, "mdi:trash-can")
|
||||
type[f['Id']] = {
|
||||
'name': f['Navn'],
|
||||
'image': f['Ikon'],
|
||||
@@ -88,10 +84,9 @@ class Source:
|
||||
'gatenavn': self._street_name,
|
||||
'husnr': self._house_number,
|
||||
'gatekode': self._street_code,
|
||||
|
||||
}
|
||||
|
||||
r = requests.get(BASE_URL + 'tommekalender', params = args, headers = headers)
|
||||
r = requests.get(API_URL + 'tommekalender', params = args, headers = headers)
|
||||
|
||||
entries = []
|
||||
for f in json.loads(r.content):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -8,7 +8,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Macedon Ranges Shire Council"
|
||||
DESCRIPTION = "Source for Macedon Ranges Shire Council rubbish collection."
|
||||
URL = "https://www.mrsc.vic.gov.au/Live-Work/Bins-Rubbish-Recycling/Bins-and-collection-days/Bin-collection-days"
|
||||
URL = "https://www.mrsc.vic.gov.au"
|
||||
TEST_CASES = {
|
||||
"Macedon IGA": {"street_address": "20 Victoria Street, Macedon"},
|
||||
"ALDI Gisborne": {"street_address": "45 Aitken Street, Gisborne"},
|
||||
@@ -45,10 +45,9 @@ class Source:
|
||||
addressSearchApiResults["Items"] is None
|
||||
or len(addressSearchApiResults["Items"]) < 1
|
||||
):
|
||||
_LOGGER.error(
|
||||
raise Exception(
|
||||
f"Address search for '{self._street_address}' returned no results. Check your address on https://www.mrsc.vic.gov.au/Live-Work/Bins-Rubbish-Recycling/Bins-and-collection-days/Bin-collection-days"
|
||||
)
|
||||
return []
|
||||
|
||||
addressSearchTopHit = addressSearchApiResults["Items"][0]
|
||||
_LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
|
||||
@@ -72,10 +71,12 @@ class Source:
|
||||
waste_type = article.h3.string
|
||||
icon = ICON_MAP.get(waste_type, "mdi:trash-can")
|
||||
next_pickup = article.find(class_="next-service").string.strip()
|
||||
if re.match("[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
|
||||
if re.match(r"[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
|
||||
next_pickup_date = datetime.strptime(
|
||||
next_pickup.split(sep=" ")[1], "%d/%m/%Y"
|
||||
).date()
|
||||
entries.append(Collection(date=next_pickup_date, t=waste_type, icon=icon))
|
||||
entries.append(
|
||||
Collection(date=next_pickup_date, t=waste_type, icon=icon)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Abfallsammlung Münchenstein"
|
||||
DESCRIPTION = "Source for Muenchenstein waste collection."
|
||||
URL = "https://www.muenchenstein.ch/abfallsammlung"
|
||||
TEST_CASES = {
|
||||
"Abfuhrkreis Ost": {"waste_district": "Abfuhrkreis Ost"},
|
||||
"Abfuhrkreis West": {"waste_district": "492"},
|
||||
}
|
||||
|
||||
|
||||
IconMap = {
|
||||
"kehricht": "mdi:trash-can",
|
||||
"hackseldienst": "mdi:leaf",
|
||||
"papierabfuhr": "mdi:newspaper-variant-multiple-outline",
|
||||
"kartonabfuhr": "mdi:package-variant",
|
||||
"altmetalle": "mdi:nail",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, waste_district):
|
||||
self._waste_district = waste_district
|
||||
|
||||
def fetch(self):
|
||||
response = requests.get(URL)
|
||||
|
||||
html = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
table = html.find("table", attrs={"id": "icmsTable-abfallsammlung"})
|
||||
data = json.loads(table.attrs["data-entities"])
|
||||
|
||||
entries = []
|
||||
for item in data["data"]:
|
||||
if (
|
||||
self._waste_district in item["abfallkreisIds"]
|
||||
or self._waste_district in item["abfallkreisNameList"]
|
||||
):
|
||||
next_pickup = item["_anlassDate-sort"].split()[0]
|
||||
next_pickup_date = datetime.fromisoformat(next_pickup).date()
|
||||
|
||||
waste_type = BeautifulSoup(item["name"], "html.parser").text
|
||||
waste_type_sorted = BeautifulSoup(item["name-sort"], "html.parser").text
|
||||
|
||||
entries.append(
|
||||
Collection(
|
||||
date=next_pickup_date,
|
||||
t=waste_type,
|
||||
icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
|
||||
# Collection of "Kehricht und Kleinsperrgut brennbar" are not listed with dates as events on website.
|
||||
# Instead it states the day of the week for each waste district: tuesday for east and friday for west
|
||||
# So we're going to set those collections programmatically for the next 4 occurrences
|
||||
weekday_collection = 2 if self._waste_district == 'Abfuhrkreis Ost' or self._waste_district == 491 else 4
|
||||
weekday_today = datetime.now().isoweekday()
|
||||
for x in range(4):
|
||||
days_to_pickup = (x * 7) + ((weekday_collection - weekday_today) % 7)
|
||||
next_pickup_date = (datetime.now() + timedelta(days=days_to_pickup)).date()
|
||||
waste_type = "Kehricht und Kleinsperrgut brennbar"
|
||||
|
||||
entries.append(
|
||||
Collection(
|
||||
date=next_pickup_date,
|
||||
t=waste_type,
|
||||
icon=IconMap.get(waste_type_sorted, "mdi:trash-can"),
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
@@ -8,7 +8,7 @@ TITLE = "North Adelaide Waste Management Authority"
|
||||
DESCRIPTION = (
|
||||
"Source for nawma.sa.gov.au (Salisbury, Playford, and Gawler South Australia)."
|
||||
)
|
||||
URL = "http://www.nawma.sa.gov.au"
|
||||
URL = "https://www.nawma.sa.gov.au"
|
||||
TEST_CASES = {
|
||||
"128 Bridge Road": {
|
||||
"street_number": "128",
|
||||
|
||||
@@ -1,62 +1,47 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import re
|
||||
from datetime import datetime
|
||||
import requests
|
||||
# These lines are needed to suppress the InsecureRequestWarning resulting from the POST verify=False option
|
||||
# With verify=True the POST fails due to a SSLCertVerificationError.
|
||||
import urllib3
|
||||
from waste_collection_schedule import Collection
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
urllib3.disable_warnings()
|
||||
# The following links may provide a better way of dealing with this, as using verify=False is not ideal:
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
|
||||
# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
TITLE = "newcastle.gov.uk"
|
||||
DESCRIPTION = (
|
||||
"""Source for waste collection services for Newcastle City Council"""
|
||||
)
|
||||
URL = "https://community.newcastle.gov.uk/my-neighbourhood/ajax/getBinsNew.php"
|
||||
REGEX = "<strong>(Green|Blue|Brown) [bB]in \\((Domestic|Recycling|Garden)( Waste)?\\) details: <\\/strong><br\\/>" \
|
||||
"collection day : [a-zA-Z]*day<br\\/>" \
|
||||
"Next collection : ([0-9]{2}-[A-Za-z]+-[0-9]{4})"
|
||||
ICONS = {
|
||||
TITLE = "Newcastle City Council"
|
||||
DESCRIPTION = "Source for waste collection services for Newcastle City Council"
|
||||
URL = "https://community.newcastle.gov.uk"
|
||||
TEST_CASES = {"Test_001": {"uprn": "004510053797"}, "Test_002": {"uprn": 4510053797}}
|
||||
|
||||
|
||||
API_URL = "https://community.newcastle.gov.uk/my-neighbourhood/ajax/getBinsNew.php"
|
||||
REGEX = (
|
||||
"[Green|Blue|Brown] [Bb]in \(([A-Za-z]+)( Waste)?\) .*? ([0-9]{2}-[A-Za-z]+-[0-9]{4})"
|
||||
)
|
||||
ICON_MAP = {
|
||||
"DOMESTIC": "mdi:trash-can",
|
||||
"RECYCLING": "mdi:recycle",
|
||||
"GARDEN": "mdi:leaf",
|
||||
}
|
||||
|
||||
TEST_CASES = {
|
||||
"Test_001": {"uprn": "004510053797"},
|
||||
"Test_002": {"uprn": 4510053797}
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, uprn=None):
|
||||
def __init__(self, uprn):
|
||||
self._uprn = str(uprn).zfill(12)
|
||||
if not self._uprn:
|
||||
_LOGGER.error(
|
||||
"uprn must be provided in config"
|
||||
)
|
||||
self._uprn = self._uprn.zfill(12)
|
||||
self._session = requests.Session()
|
||||
|
||||
def fetch(self):
|
||||
entries = []
|
||||
res = requests.get(f"{URL}?uprn={self._uprn}")
|
||||
res = requests.get(f"{API_URL}?uprn={self._uprn}")
|
||||
collections = re.findall(REGEX, res.text)
|
||||
|
||||
for collection in collections:
|
||||
collection_type = collection[1]
|
||||
collection_date = collection[3]
|
||||
collection_type = collection[0]
|
||||
collection_date = collection[2]
|
||||
entries.append(
|
||||
Collection(
|
||||
date=datetime.strptime(collection_date, '%d-%b-%Y').date(),
|
||||
date=datetime.strptime(collection_date, "%d-%b-%Y").date(),
|
||||
t=collection_type,
|
||||
icon=ICONS.get(collection_type.upper()),
|
||||
icon=ICON_MAP.get(collection_type.upper()),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "Nillumbik Shire Council"
|
||||
DESCRIPTION = "Source for Nillumbik Shire Council rubbish collection."
|
||||
URL = "https://www.nillumbik.vic.gov.au"
|
||||
TEST_CASES = {"Test": {"street_address": "11 Sunnyside Crescent, WATTLE GLEN, 3096"}}
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
ICON_MAP = {
|
||||
"Food and Green Waste": "mdi:leaf",
|
||||
"Hard Waste": "mdi:sofa",
|
||||
"Recycling": "mdi:recycle",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_address):
|
||||
self._street_address = street_address
|
||||
|
||||
def fetch(self):
|
||||
session = requests.Session()
|
||||
|
||||
response = session.get(
|
||||
"https://www.nillumbik.vic.gov.au/Residents/Waste-and-recycling/Bin-collection/Check-my-bin-day"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
response = session.get(
|
||||
"https://www.nillumbik.vic.gov.au/api/v1/myarea/search",
|
||||
params={"keywords": self._street_address},
|
||||
)
|
||||
response.raise_for_status()
|
||||
addressSearchApiResults = response.json()
|
||||
if (
|
||||
addressSearchApiResults["Items"] is None
|
||||
or len(addressSearchApiResults["Items"]) < 1
|
||||
):
|
||||
raise Exception(
|
||||
f"Address search for '{self._street_address}' returned no results. Check your address on https://www.nillumbik.vic.gov.au/Residents/Waste-and-recycling/Bin-collection/Check-my-bin-day"
|
||||
)
|
||||
|
||||
addressSearchTopHit = addressSearchApiResults["Items"][0]
|
||||
_LOGGER.debug("Address search top hit: %s", addressSearchTopHit)
|
||||
|
||||
geolocationid = addressSearchTopHit["Id"]
|
||||
_LOGGER.debug("Geolocationid: %s", geolocationid)
|
||||
|
||||
response = session.get(
|
||||
"https://www.nillumbik.vic.gov.au/ocapi/Public/myarea/wasteservices?ocsvclang=en-AU",
|
||||
params={"geolocationid": geolocationid},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
wasteApiResult = response.json()
|
||||
_LOGGER.debug("Waste API result: %s", wasteApiResult)
|
||||
|
||||
soup = BeautifulSoup(wasteApiResult["responseContent"], "html.parser")
|
||||
|
||||
entries = []
|
||||
for article in soup.find_all("article"):
|
||||
waste_type = article.h3.string
|
||||
icon = ICON_MAP.get(waste_type, "mdi:trash-can")
|
||||
next_pickup = article.find(class_="next-service").string.strip()
|
||||
if re.match(r"[^\s]* \d{1,2}\/\d{1,2}\/\d{4}", next_pickup):
|
||||
next_pickup_date = datetime.strptime(
|
||||
next_pickup.split(sep=" ")[1], "%d/%m/%Y"
|
||||
).date()
|
||||
entries.append(
|
||||
Collection(date=next_pickup_date, t=waste_type, icon=icon)
|
||||
)
|
||||
|
||||
return entries
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "nottinghamcity.gov.uk"
|
||||
TITLE = "Nottingham City Council"
|
||||
DESCRIPTION = "Source for nottinghamcity.gov.uk services for the city of Nottingham, UK."
|
||||
URL = "https://nottinghamcity.gov.uk"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -5,7 +5,7 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "North Somerset.gov.uk"
|
||||
TITLE = "North Somerset Council"
|
||||
DESCRIPTION = "Source for n-somerset.gov.uk services for North Somerset, UK."
|
||||
URL = "n-somerset.gov.uk"
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "Abfallwirtschaft Nürnberger Land"
|
||||
DESCRIPTION = "Source for Nürnberger Land"
|
||||
URL = "https://nuernberger-land.de"
|
||||
TEST_CASES = {
|
||||
"Schwarzenbruck, Mühlbergstraße": {"id": 16952001},
|
||||
"Burgthann, Brunhildstr": {"id": 14398001},
|
||||
"Kirchensittenbach, Erlenweg": {"id": 15192001},
|
||||
}
|
||||
|
||||
API_URL = "https://abfuhrkalender.nuernberger-land.de/waste_calendar"
|
||||
|
||||
ICON_MAP = {
|
||||
"Restmüll": "mdi:trash-can",
|
||||
"Biotonne": "mdi:leaf",
|
||||
"Gelber Sack": "mdi:recycle",
|
||||
"Papier": "mdi:package-variant",
|
||||
"Giftmobil": "mdi:biohazard",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(self, id):
|
||||
self._id = id
|
||||
self._ics = ICS(split_at="/")
|
||||
|
||||
def fetch(self):
|
||||
# fetch the ical
|
||||
r = requests.get(f"{API_URL}/ical?id={self._id}")
|
||||
r.raise_for_status()
|
||||
|
||||
# replace non-ascii character in UID, otherwise ICS converter will fail
|
||||
ics = ""
|
||||
for line in r.text.splitlines():
|
||||
if line.startswith("UID"):
|
||||
line = line.replace("ä", "ae")
|
||||
line = line.replace("ö", "oe")
|
||||
line = line.replace("ü", "ue")
|
||||
ics += line
|
||||
ics += "\n"
|
||||
|
||||
dates = self._ics.convert(ics)
|
||||
|
||||
entries = []
|
||||
|
||||
for d in dates:
|
||||
entries.append(Collection(date=d[0], t=d[1], icon=ICON_MAP.get(d[1])))
|
||||
|
||||
return entries
|
||||
@@ -9,7 +9,7 @@ from pprint import pprint
|
||||
|
||||
TITLE = "Oslo Kommune"
|
||||
DESCRIPTION = "Oslo Kommune (Norway)."
|
||||
URL = "https://www.oslo.kommune.no/avfall-og-gjenvinning/nar-hentes-avfallet/"
|
||||
URL = "https://www.oslo.kommune.no"
|
||||
|
||||
# **street_code:** \
|
||||
# **county_id:** \
|
||||
@@ -29,7 +29,12 @@ TEST_CASES = {
|
||||
}
|
||||
}
|
||||
|
||||
BASE_URL = "https://www.oslo.kommune.no/xmlhttprequest.php"
|
||||
API_URL = "https://www.oslo.kommune.no/xmlhttprequest.php"
|
||||
ICON_MAP = {
|
||||
"": "mdi:trash-can",
|
||||
"restavfall": "mdi:trash-can",
|
||||
"papir": "mdi:newspaper-variant-multiple"
|
||||
}
|
||||
|
||||
class Source:
|
||||
def __init__(self, street_name, house_number, house_letter, street_id):
|
||||
@@ -37,11 +42,6 @@ class Source:
|
||||
self._house_number = house_number
|
||||
self._house_letter = house_letter
|
||||
self._street_id = street_id
|
||||
self._icon_map = {
|
||||
"": "mdi:trash-can",
|
||||
"restavfall": "mdi:trash-can",
|
||||
"papir": "mdi:newspaper-variant-multiple"
|
||||
}
|
||||
|
||||
def fetch(self):
|
||||
headers = {
|
||||
@@ -56,7 +56,7 @@ class Source:
|
||||
'street_id': self._street_id,
|
||||
}
|
||||
|
||||
r = requests.get(BASE_URL, params = args, headers = headers)
|
||||
r = requests.get(API_URL, params = args, headers = headers)
|
||||
|
||||
entries = []
|
||||
res = json.loads(r.content)['data']['result'][0]['HentePunkts']
|
||||
@@ -70,7 +70,7 @@ class Source:
|
||||
tjeneste['TommeDato'], "%d.%m.%Y"
|
||||
).date(),
|
||||
t = tekst,
|
||||
icon = self._icon_map[tekst.lower()] or "mdi:trash-can"
|
||||
icon = ICON_MAP.get(tekst.lower(), "mdi:trash-can")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -4,11 +4,11 @@ import datetime
|
||||
import requests
|
||||
from waste_collection_schedule import Collection
|
||||
|
||||
TITLE = "Peterborough.gov.uk"
|
||||
TITLE = "Peterborough City Council"
|
||||
DESCRIPTION = (
|
||||
"Source for peterborough.gov.uk services for Peterborough"
|
||||
)
|
||||
URL = "peterborough.gov.uk"
|
||||
URL = "https://peterborough.gov.uk"
|
||||
TEST_CASES = {
|
||||
"houseNumber": {"post_code": "PE57AX", "number": 1},
|
||||
"houseName": {"post_code": "PE57AX", "name": "CASTOR HOUSE"},
|
||||
@@ -20,7 +20,7 @@ API_URLS = {
|
||||
"collection": "https://www.peterborough.gov.uk/api/jobs/{start}/{end}/{uprn}",
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"Empty Bin 240L Black": "mdi:trash-can",
|
||||
"Empty Bin 240L Green": "mdi:recycle",
|
||||
"Empty Bin 240L Brown": "mdi:leaf",
|
||||
@@ -74,7 +74,7 @@ class Source:
|
||||
collection["nextDate"], "%Y-%m-%dT%H:%M:%S"
|
||||
).date(),
|
||||
t=collection["jobDescription"],
|
||||
icon=ICONS.get(collection["jobDescription"]),
|
||||
icon=ICON_MAP.get(collection["jobDescription"]),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ from urllib.parse import quote
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
|
||||
TITLE = "PGH.ST"
|
||||
TITLE = "City of Pittsburgh"
|
||||
DESCRIPTION = "Source for PGH.ST services for the city of Pittsburgh, PA, USA."
|
||||
URL = "http://www.pgh.st"
|
||||
TEST_CASES = {}
|
||||
URL = "https://www.pgh.st"
|
||||
COUNTRY = "us"
|
||||
TEST_CASES = {
|
||||
"Pittsburgh, Negley": {
|
||||
"house_number": 800,
|
||||
|
||||
@@ -51,20 +51,17 @@ class Source:
|
||||
"Authorization": "",
|
||||
}
|
||||
r = requests.get(f"{url}/access-token", headers=headers)
|
||||
r.raise_for_status()
|
||||
headers["Authorization"] = r.json()["accessToken"]
|
||||
|
||||
params = {"q": self._postcode}
|
||||
r = requests.get(f"{url}/zipcodes", params=params, headers=headers)
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Get zip code failed")
|
||||
return []
|
||||
r.raise_for_status()
|
||||
zipcodeId = r.json()["items"][0]["id"]
|
||||
|
||||
params = {"q": self._street, "zipcodes": zipcodeId}
|
||||
r = requests.post(f"{url}/streets", params=params, headers=headers)
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Get street id failed")
|
||||
return []
|
||||
r.raise_for_status()
|
||||
|
||||
streetId = None
|
||||
for item in r.json()["items"]:
|
||||
@@ -85,9 +82,7 @@ class Source:
|
||||
# "size":100,
|
||||
}
|
||||
r = requests.get(f"{url}/collections", params=params, headers=headers)
|
||||
if r.status_code != 200:
|
||||
_LOGGER.error("Get data failed")
|
||||
return []
|
||||
r.raise_for_status()
|
||||
|
||||
entries = []
|
||||
for item in r.json()["items"]:
|
||||
|
||||
@@ -7,6 +7,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
TITLE = "RecycleSmart"
|
||||
DESCRIPTION = "Source for RecycleSmart collection."
|
||||
URL = "https://www.recyclesmart.com/"
|
||||
COUNTRY = "au"
|
||||
TEST_CASES = {
|
||||
"pickup": {
|
||||
"email": "!secret recyclesmart_email",
|
||||
|
||||
@@ -2,15 +2,14 @@ import requests
|
||||
from waste_collection_schedule import Collection
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "RegioEntsorgung"
|
||||
TITLE = "RegioEntsorgung Städteregion Aachen"
|
||||
DESCRIPTION = "RegioEntsorgung Städteregion Aachen"
|
||||
URL = "https://regioentsorgung.de/service/abfallkalender/"
|
||||
|
||||
URL = "https://regioentsorgung.de"
|
||||
TEST_CASES = {
|
||||
"Merzbrück": {"city": "Würselen", "street": "Merzbrück", "house_number": 200 },
|
||||
}
|
||||
|
||||
BASE_URL = "https://tonnen.regioentsorgung.de/WasteManagementRegioentsorgung/WasteManagementServlet"
|
||||
API_URL = "https://tonnen.regioentsorgung.de/WasteManagementRegioentsorgung/WasteManagementServlet"
|
||||
|
||||
HEADERS = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
|
||||
@@ -31,7 +30,7 @@ class Source:
|
||||
payload = {
|
||||
'SubmitAction': 'wasteDisposalServices',
|
||||
}
|
||||
r = session.get(BASE_URL, headers=HEADERS, params=payload)
|
||||
r = session.get(API_URL, headers=HEADERS, params=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
payload = {
|
||||
@@ -41,7 +40,7 @@ class Source:
|
||||
'Strasse': '',
|
||||
'Hausnummer': '',
|
||||
}
|
||||
r = session.post(BASE_URL, headers=HEADERS, data=payload)
|
||||
r = session.post(API_URL, headers=HEADERS, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
payload = {
|
||||
@@ -51,7 +50,7 @@ class Source:
|
||||
'Strasse': self.street,
|
||||
'Hausnummer': '',
|
||||
}
|
||||
r = session.post(BASE_URL, headers=HEADERS, data=payload)
|
||||
r = session.post(API_URL, headers=HEADERS, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
payload = {
|
||||
@@ -61,14 +60,14 @@ class Source:
|
||||
'Strasse': self.street,
|
||||
'Hausnummer': self.house_number,
|
||||
}
|
||||
r = session.post(BASE_URL, headers=HEADERS, data=payload)
|
||||
r = session.post(API_URL, headers=HEADERS, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
payload = {
|
||||
'ApplicationName': 'com.athos.kd.regioentsorgung.AbfuhrTerminModel',
|
||||
'SubmitAction': 'forward',
|
||||
}
|
||||
r = session.post(BASE_URL, headers=HEADERS, data=payload)
|
||||
r = session.post(API_URL, headers=HEADERS, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
payload = {
|
||||
@@ -86,7 +85,7 @@ class Source:
|
||||
'ICalZeit': '06:00 Uhr',
|
||||
'SubmitAction': 'filedownload_ICAL',
|
||||
}
|
||||
r = session.post(BASE_URL, headers=HEADERS, data=payload)
|
||||
r = session.post(API_URL, headers=HEADERS, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
# Parse ics file
|
||||
|
||||
@@ -7,6 +7,7 @@ from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
TITLE = "Republic Services"
|
||||
DESCRIPTION = "Source for Republic Services Collection."
|
||||
URL = "https://www.republicservices.com"
|
||||
COUNTRY = "us"
|
||||
TEST_CASES = {
|
||||
"Scott Country Clerk": {"street_address": "101 E Main St, Georgetown, KY 40324"},
|
||||
"Branch County Clerk": {"street_address": "31 Division St. Coldwater, MI 49036"}
|
||||
|
||||
@@ -4,7 +4,7 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "RH Entsorgung"
|
||||
TITLE = "Rhein-Hunsrück Entsorgung (RHE)"
|
||||
DESCRIPTION = "Source for RHE (Rhein Hunsrück Entsorgung)."
|
||||
URL = "https://www.rh-entsorgung.de"
|
||||
TEST_CASES = {
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
import json
|
||||
|
||||
TITLE = "Richmondshire (North Yorkshire)"
|
||||
TITLE = "Richmondshire District Council"
|
||||
DESCRIPTION = "To find your UPRN, visit the Richmondshire page and use the address search. Right-click your entry in the house dropdown, choose Inspect, and copy the UPRN from the value"
|
||||
URL = "https://www.richmondshire.gov.uk/collectionCalendar"
|
||||
TEST_CASES = {"test 1": {"uprn": 200001767082}, "test 2": {"uprn": 200001767078}, "test3": {"uprn": 200001767079 }}
|
||||
URL = "https://www.richmondshire.gov.uk"
|
||||
TEST_CASES = {
|
||||
"test1": {"uprn": 200001767082},
|
||||
"test2": {"uprn": 200001767078},
|
||||
"test3": {"uprn": 200001767079},
|
||||
}
|
||||
|
||||
ICONS = {
|
||||
ICON_MAP = {
|
||||
"240L GREY RUBBISH BIN": "mdi:trash-can",
|
||||
"55L RECYCLING BOX": "mdi:recycle",
|
||||
"140L GARDEN BIN": "mdi:leaf",
|
||||
}
|
||||
|
||||
|
||||
class Source:
|
||||
def __init__(
|
||||
self, uprn
|
||||
@@ -24,9 +29,7 @@ class Source:
|
||||
r = requests.get(
|
||||
f"https://www.richmondshire.gov.uk/Umbraco/Api/MyAreaApi/GetBinRoundData?uprn={self._uprn}"
|
||||
)
|
||||
# print(r.text)
|
||||
ids = r.json()
|
||||
# print (len(ids))
|
||||
|
||||
entries = []
|
||||
|
||||
@@ -35,7 +38,7 @@ class Source:
|
||||
Collection(
|
||||
date=datetime.strptime(id["start"], "%Y-%m-%dT%H:%M:%S").date(),
|
||||
t=id["title"],
|
||||
icon=ICONS.get(id["title"]),
|
||||
icon=ICON_MAP.get(id["title"]),
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
@@ -2,9 +2,8 @@ import requests
|
||||
from waste_collection_schedule import Collection # type: ignore[attr-defined]
|
||||
from waste_collection_schedule.service.ICS import ICS
|
||||
|
||||
TITLE = "rushmoor.gov.uk"
|
||||
TITLE = "Rushmoor Borough Council"
|
||||
DESCRIPTION = "Source for rushmoor.gov.uk services for Rushmoor, UK."
|
||||
# Find the UPRN of your address using https://www.findmyaddress.co.uk/search
|
||||
URL = "https://rushmoor.gov.uk"
|
||||
TEST_CASES = {
|
||||
"GU14": {"uprn": "100060551749"},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user