More typings

This commit is contained in:
Dmitriy Shishkov 2023-09-19 06:12:29 +03:00
parent 118f007bfe
commit 600daa5498
Signed by: dm1sh
GPG Key ID: 027994B0AA357688
4 changed files with 19 additions and 17 deletions

View File

@ -5,7 +5,7 @@ import time
from . import RossetiParser, split_addresses, fetch_builing_ids, preprocess_df
def job():
def job() -> None:
parser = RossetiParser()
print(parser)

View File

@ -1,4 +1,5 @@
from typing import List, TypeVar
from __future__ import annotations
from typing import List, Iterable, TypeVar, Any
import pandas as pd
import re
@ -34,11 +35,11 @@ def unfold_houses_list(token: str) -> List[str]:
return [token]
def any_of_in(substrings: List[str], string) -> bool:
def any_of_in(substrings: Iterable[str], string: str) -> bool:
return any(map(lambda substring: substring in string, substrings))
def flatten(arr: List[List[T]]) -> List[T]:
def flatten(arr: Iterable[List[T]]) -> List[T]:
return sum(arr, [])
@ -69,7 +70,7 @@ def split_address(address: str) -> List[str]:
return [address]
def process_row(row):
def process_row(row: pd.Series[str]) -> pd.Series[str]:
if pd.isnull(row['Улица']):
return row

View File

@ -1,10 +1,11 @@
from typing import Optional, Tuple
from __future__ import annotations
from typing import Optional, Tuple, Any
import requests
import pandas as pd
def get_building_id(row) -> Optional[Tuple[int, float, float]]:
def get_building_id(row: pd.Series[Any]) -> Optional[Tuple[int, float, float]]:
r = requests.get('https://geocode.gate.petersburg.ru/parse/eas', params={
'street': row['Улица']
})

View File

@ -8,7 +8,7 @@ import pandas as pd
class RossetiParser:
def __init__(self, ndays=7, today: Optional[datetime] = None, file_path: Optional[str] = None):
def __init__(self, ndays=7, today: Optional[datetime] = None, file_path: Optional[str] = None) -> None:
self.base_url = "https://rosseti-lenenergo.ru/planned_work"
if today is None:
@ -23,7 +23,7 @@ class RossetiParser:
else:
self.load_df(file_path)
def __str__(self):
def __str__(self) -> str:
return f"From {self.today.date()} for {self.ndays} days with {len(self.df)} records"
@staticmethod
@ -39,7 +39,7 @@ class RossetiParser:
'date_finish': date_finish
}
def __get_page(self, url: str, params: Mapping[str, str]):
def __get_page(self, url: str, params: Mapping[str, str]) -> None:
r = requests.get(url, params)
self.soup = BeautifulSoup(r.text, features='html.parser')
@ -58,14 +58,14 @@ class RossetiParser:
return pd.read_html(io.StringIO(str(table)))[0]
def __save_page(self, uri: str):
def __save_page(self, uri: str) -> None:
print(f'Processing page "{uri}"')
self.__get_page(self.base_url + uri, self.__params)
self.df = pd.concat(
(self.df, self.__parse_table()), ignore_index=True)
def __set_columns(self):
self.df.columns = (
def __set_columns(self) -> None:
self.df.columns = pd.Index((
"Регион РФ (область, край, город фед. значения, округ)",
"Административный район",
"Населённый пункт",
@ -77,9 +77,9 @@ class RossetiParser:
"Филиал",
"РЭС",
"Комментарий",
)
))
def fetch(self, ndays: Optional[int] = None, today: Optional[datetime] = None):
def fetch(self, ndays: Optional[int] = None, today: Optional[datetime] = None) -> None:
if ndays is None:
ndays = self.ndays
if today is None:
@ -100,10 +100,10 @@ class RossetiParser:
self.__set_columns()
def save_df(self, file_path: str):
def save_df(self, file_path: str) -> None:
print(f'Saved as "{file_path}"')
self.df.to_csv(file_path, index=False)
def load_df(self, file_path: str):
def load_df(self, file_path: str) -> None:
print(f'Read from "{file_path}"')
self.df = pd.read_csv(file_path)