Compare commits
13 Commits
259c71b17b
...
main
Author | SHA1 | Date | |
---|---|---|---|
d653810dcb | |||
c735ca2ee5
|
|||
2efb4d4846
|
|||
3bd1deb8db
|
|||
430f36619a
|
|||
367abfc325
|
|||
68b92b8bd2
|
|||
931ff1270b
|
|||
cc2029802b
|
|||
06f08d4933
|
|||
e6af86703e
|
|||
662c2541db
|
|||
5722fc86fb
|
@ -1,5 +1,9 @@
|
||||
REFETCH_PERIOD_H=6
|
||||
STORE_NULL_BID=True
|
||||
|
||||
POSTGRES_USER=lenenergo
|
||||
POSTGRES_PASSWORD=lenenergo
|
||||
POSTGRES_DB=lenenergo
|
||||
POSTGRES_HOST=db
|
||||
# or
|
||||
DB_URL=postgresql://lenenergo:lenenergo@localhost:5432
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -5,3 +5,5 @@ __pycache__
|
||||
data*.csv
|
||||
.idea/
|
||||
.ipynb_checkpoints
|
||||
.vscode/
|
||||
*.odb
|
||||
|
15
.vscode/launch.json
vendored
15
.vscode/launch.json
vendored
@ -1,15 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python: Module",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "parser",
|
||||
"justMyCode": true,
|
||||
}
|
||||
]
|
||||
}
|
37
README.md
Normal file
37
README.md
Normal file
@ -0,0 +1,37 @@
|
||||
# Lenenergo Parser
|
||||
|
||||
## DB columns settings
|
||||
|
||||
Append to `runner/config.py`
|
||||
|
||||
```python
|
||||
DB_COLUMNS_MAP["<COL_NS key>"] = "<corresponding db column name>"
|
||||
```
|
||||
|
||||
## Running instructions
|
||||
|
||||
```bash
|
||||
docker build . -it lenenergo_parser
|
||||
docker run -d \
|
||||
[-e REFETCH_PERIOD_H=4] \ # Refetch period
|
||||
[-e STORE_NULL_BID=False] \ # Store rows with null building_id
|
||||
# DB auth variants
|
||||
[-e POSTGRES_USER=lenenergo] \
|
||||
[-e POSTGRES_PASSWORD=lenenergo] \
|
||||
[-e POSTGRES_DB=lenenergo] \
|
||||
[-e POSTGRES_HOST=localhost] \
|
||||
[-e POSTGRES_PORT=5432] \
|
||||
# or
|
||||
[DB_URL=postgresql://lenenergo:lenenergo@localhost:5432/lenenergo] \
|
||||
lenenergo_parser
|
||||
```
|
||||
|
||||
## Dev instructions
|
||||
|
||||
```bash
|
||||
python -m venv .venv
|
||||
|
||||
pip install -r requirements.txt
|
||||
|
||||
python -m runner
|
||||
```
|
@ -31,12 +31,18 @@ def split_addresses(df: pd.DataFrame) -> pd.DataFrame
|
||||
```
|
||||
- `get_building_id`:
|
||||
```python
|
||||
def get_building_id(street: str) -> Tuple[Optional[int], Optional[float], Optional[float]]
|
||||
def get_building_id(street: str) -> GeoTupleType
|
||||
```
|
||||
- `fetch_builing_ids`:
|
||||
```python
|
||||
def fetch_builing_ids(df: pd.DataFrame) -> pd.DataFrame
|
||||
```
|
||||
- `async_fetch_building_id`:
|
||||
```python
|
||||
async def async_fetch_building_id(
|
||||
session: aiohttp.ClientSession, street: str
|
||||
) -> GeoTupleType
|
||||
```
|
||||
- `async_fetch_building_ids`:
|
||||
```python
|
||||
async def async_fetch_building_ids(df: pd.DataFrame) -> pd.DataFrame
|
||||
@ -51,11 +57,15 @@ def preprocess_df(df: pd.DataFrame) -> pd.DataFrame
|
||||
```
|
||||
- `COL_NS`:
|
||||
```python
|
||||
COL_NS: Dict[str, str]
|
||||
COL_NS: dict[str, str]
|
||||
```
|
||||
- `ICOL_NS`:
|
||||
```python
|
||||
ICOL_NS: Dict[str, str]
|
||||
ICOL_NS: dict[str, str]
|
||||
```
|
||||
- `PR_COL_NS`:
|
||||
```python
|
||||
PR_COL_NS: tuple[str]
|
||||
```
|
||||
- `preprocess_read_df`:
|
||||
```python
|
||||
@ -82,4 +92,4 @@ pip install -r requirements.txt
|
||||
python -m parser [<Период в часах>]
|
||||
```
|
||||
|
||||
Формат сохраняемых файлов: `data_%d-%m-%y_%H:%M.csv`
|
||||
Формат сохраняемых файлов: `data_%d-%m-%y_%H.%M.csv`
|
||||
|
@ -1,5 +1,6 @@
|
||||
from .address import split_addresses
|
||||
from .building_id import (
|
||||
GeoTupleType,
|
||||
async_fetch_building_id,
|
||||
async_fetch_building_ids,
|
||||
concurrent_fetch_builing_ids,
|
||||
@ -7,24 +8,27 @@ from .building_id import (
|
||||
get_building_id,
|
||||
)
|
||||
from .lenenergo import LenenergoParser
|
||||
from .pipeline import pipeline
|
||||
from .preprocess import (
|
||||
COL_NS,
|
||||
ICOL_NS,
|
||||
PR_COL_NS,
|
||||
group_by_index,
|
||||
preprocess_df,
|
||||
preprocess_read_df,
|
||||
)
|
||||
from .util import pipeline
|
||||
|
||||
__all__ = (
|
||||
"COL_NS",
|
||||
"GeoTupleType",
|
||||
"ICOL_NS",
|
||||
"PR_COL_NS",
|
||||
"async_fetch_building_id",
|
||||
"async_fetch_building_ids",
|
||||
"COL_NS",
|
||||
"concurrent_fetch_builing_ids",
|
||||
"fetch_builing_ids",
|
||||
"get_building_id",
|
||||
"group_by_index",
|
||||
"ICOL_NS",
|
||||
"LenenergoParser",
|
||||
"pipeline",
|
||||
"preprocess_df",
|
||||
|
@ -3,11 +3,11 @@ import time
|
||||
|
||||
import schedule
|
||||
|
||||
from . import pipeline
|
||||
from . import LenenergoParser, pipeline
|
||||
|
||||
|
||||
def job():
|
||||
parser = pipeline()
|
||||
parser = pipeline(LenenergoParser(file_path="./data.csv"))
|
||||
parser.save_df(f'./data_{parser.today.strftime("%d-%m-%y_%H.%M")}.csv')
|
||||
|
||||
|
||||
|
@ -1,295 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Iterable, List, TypeVar
|
||||
|
||||
import pandas as pd
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
CLASSES = ("w", "d", "c", "t", "s", "h", "b","e", "l", "r")
|
||||
DISTRICTS_PREFIXES = ("мо ", "р-н","городское","лесхоз")
|
||||
COUNTRYSIDE_PREFIXES = (
|
||||
"г", "п", "д", "гп", "рп", "кп", "пгт", "c", "хутор", " урочище")
|
||||
TERRITORY_PREFIXES = (
|
||||
"тер.", " тер", "снт ", "ст ", "дск ", "днп ", "дпк ", "нп ", "пдк ", "т/б ", "садоводство", "массив", "хозя", "сад-во")
|
||||
STREET_PREFIXES = (
|
||||
" ул", " бул", " пр", " ш", " пер", " дор", " маг", " наб", " пл", " просп", " туп", "шоссе", "лини", "аллея",
|
||||
"мост", " парк", "кольцо", "проезд", "съезд","переулок",
|
||||
"ул.", "бул.", "пр.", "ш.", "пер.", "дор.", "маг.", "наб.", "пл.", "просп.", "туп.")
|
||||
HOUSES_PREFIXES = ("д.", "уч.", "участок", "мкд", "тп","дом","дома")
|
||||
BUILDING_PREFIXES = ("к.", "к ","корп", "корпус")
|
||||
EDIFICE_PREFIXES=("стр.", "строение")
|
||||
LETTER = ("лит.", "литера", " л.")
|
||||
PREFIXES = (DISTRICTS_PREFIXES, COUNTRYSIDE_PREFIXES, TERRITORY_PREFIXES, STREET_PREFIXES, HOUSES_PREFIXES, BUILDING_PREFIXES, EDIFICE_PREFIXES,LETTER)
|
||||
|
||||
|
||||
def unfold_house_ranges(token: str) -> List[str]:
|
||||
addresses = []
|
||||
pairs_strings = re.findall(r"([\d]+-[\d]+)", token)
|
||||
for pair_string in pairs_strings:
|
||||
a, b = pair_string.split("-")
|
||||
a, b = int(a), int(b)
|
||||
|
||||
if b > a:
|
||||
addresses += [re.sub(r"([\d]+-[\d]+)", number, token) for number in map(str, range(a, b + 1))]
|
||||
else:
|
||||
token = token.replace("-", "/")
|
||||
if not addresses:
|
||||
addresses.append(token)
|
||||
return addresses
|
||||
|
||||
def any_of_in(substrings: Iterable[str], string: str) -> bool:
|
||||
return any(map(lambda substring: substring in string, substrings))
|
||||
|
||||
|
||||
def flatten(arr: Iterable[List[T]]) -> List[T]:
|
||||
return sum(arr, [])
|
||||
|
||||
|
||||
def find_room(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\bпом\.?", token['obj']):
|
||||
return "r"
|
||||
return ""
|
||||
|
||||
|
||||
def find_litera(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if find_room(token, pre_token):
|
||||
return ""
|
||||
if any_of_in(LETTER, token['obj'].lower()) \
|
||||
or re.search(r"\d{1,3}([А-Я]|[а-я])( |$)", token['obj']):
|
||||
return "l"
|
||||
if (re.search(r"\b([А-Я]|[а-я]){1}$", token['obj']) \
|
||||
and ("l" in pre_token['class'] or "h" in pre_token['class'])) \
|
||||
and not (" ш" in token["obj"]) \
|
||||
and not find_countryside(token, pre_token):
|
||||
return "l"
|
||||
return ""
|
||||
def find_edifice(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(EDIFICE_PREFIXES, token['obj'].lower()):
|
||||
return "e"
|
||||
return ""
|
||||
|
||||
def find_building(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\d", token['obj']) and not find_room(token,pre_token):
|
||||
if any_of_in(BUILDING_PREFIXES, token['obj'].lower()) \
|
||||
or "b" in pre_token['class'] and not ("h" in token['class']) and not find_edifice(token,pre_token)\
|
||||
or re.search(r"к\.* ?\d", token['obj']):
|
||||
return "b"
|
||||
return ""
|
||||
|
||||
|
||||
def find_house(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\d{1,4}", token['obj']) and not find_room(token,pre_token):
|
||||
if any_of_in(HOUSES_PREFIXES, token['obj'].lower()):
|
||||
return "h"
|
||||
if re.search(r"(д|д\.) ?\d{1,4} ?\/*\d* ?", token['obj']):
|
||||
return "h"
|
||||
if ("s" in pre_token['class'] or "h" in pre_token['class'] or "s" in token['class']) \
|
||||
and not any_of_in(("-я", "-й", "-Я"), token['obj']) \
|
||||
and not find_building(token, pre_token)\
|
||||
and not find_edifice(token,pre_token):
|
||||
return "h"
|
||||
if find_building(token, pre_token) \
|
||||
and not any_of_in(("-я", "-й", "-Я"), token['obj']) \
|
||||
and True:
|
||||
if len(re.findall(r"\d{1,4}", token['obj'])) > 1:
|
||||
return "h"
|
||||
if int(re.search(r"\d{1,4}", token['obj']).group()) // 10 >0:
|
||||
return "h"
|
||||
return ""
|
||||
|
||||
|
||||
def find_street(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(STREET_PREFIXES, token['obj'].lower()):
|
||||
return "s"
|
||||
if re.search(r"\b[А-Яа-я]{4,}\b", token['obj']) \
|
||||
and not any([el in token["obj"].lower() for pr in PREFIXES for el in pr if len(el)>2]) \
|
||||
and not ("d" in token["class"] or "t" in token["class"] or "c" in token["class"]):
|
||||
return "s"
|
||||
return ""
|
||||
|
||||
|
||||
def find_territory(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(TERRITORY_PREFIXES, token['obj'].lower()):
|
||||
return "t"
|
||||
return ""
|
||||
|
||||
|
||||
def find_countryside(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(COUNTRYSIDE_PREFIXES, token['obj'].lower()) \
|
||||
and re.search(r"\b[гпдрпктc]{1,3}(\b|\. )", token['obj']) \
|
||||
and not find_house(token, pre_token) \
|
||||
and not any_of_in(STREET_PREFIXES, token['obj'].lower()):
|
||||
return "c"
|
||||
return ""
|
||||
|
||||
|
||||
def find_district(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(DISTRICTS_PREFIXES, token['obj'].lower()):
|
||||
return "d"
|
||||
return ""
|
||||
|
||||
def address_classification(token: pd.Series, pre_token: pd.Series) -> pd.Series:
|
||||
brackets = re.search(r"\(.+\)", token["obj"])
|
||||
if brackets:
|
||||
token["obj"] = re.sub(r"\(.+\)", "()", token["obj"])
|
||||
token["class"] += find_district(token, pre_token)
|
||||
token["class"] += find_countryside(token, pre_token)
|
||||
token["class"] += find_territory(token, pre_token)
|
||||
token["class"] += find_street(token, pre_token)
|
||||
token["class"] += find_house(token, pre_token)
|
||||
token["class"] += find_building(token, pre_token)
|
||||
token["class"] += find_edifice(token, pre_token)
|
||||
token["class"] += find_litera(token, pre_token)
|
||||
token["class"] += find_room(token, pre_token)
|
||||
if token['class'] == "":
|
||||
token['class'] = "w"
|
||||
if brackets:
|
||||
token["obj"] = re.sub(r"\(\)", brackets.group(), token["obj"])
|
||||
return token
|
||||
|
||||
def cut_address(ad: pd.Series, cl: str) -> pd.Series:
|
||||
while ad["class"] and CLASSES.index(ad["class"][-1]) > CLASSES.index(cl[0]):
|
||||
if ad["class"][-1] == "h":
|
||||
ad["address"] = re.sub(r"[мкдтпучасток]*\.? ?\d{1,4} ?\/*\d* ?", "",
|
||||
ad["address"].lower())
|
||||
elif ad["class"][-1] == "b":
|
||||
num = re.findall("к{0,1}\.? ?\d", ad["address"])[-1]
|
||||
ad["address"] = re.sub(num, "", ad["address"])
|
||||
elif ad["class"][-1] == "e":
|
||||
ad["address"] = re.sub(r"cтр\.? ?\d", "", ad["address"])
|
||||
elif ad["class"][-1] == "l":
|
||||
ad["address"] = re.sub(r"[литера]*\.? ?[А-Яа-я]{1}$", "", ad["address"])
|
||||
elif ad["class"][-1] == "r":
|
||||
ad["address"] = re.sub(r"пом\.? ?\d+", "", ad["address"])
|
||||
ad["class"] = ad["class"][:-1]
|
||||
return ad
|
||||
|
||||
|
||||
# TODO: переработать систему из if в нормальный вид
|
||||
def split_address(address: str) -> List[str]:
|
||||
if ";" in address:
|
||||
address = address.replace(";", ",")
|
||||
if "," in address:
|
||||
tokens = address.split(",")
|
||||
|
||||
t = list(map(str.strip, filter(lambda token: token != "", tokens)))
|
||||
|
||||
tokens = pd.DataFrame()
|
||||
tokens['obj'] = t
|
||||
for el in ("", "уг.", "д."):
|
||||
tokens = tokens[tokens["obj"] != el]
|
||||
tokens.insert(len(tokens.columns), "class", "")
|
||||
res = []
|
||||
accumulator = pd.Series(data={"address": "", "class": ""})
|
||||
|
||||
for i in range(len(tokens)):
|
||||
cur_tk = tokens.iloc[i]
|
||||
|
||||
if i == 0:
|
||||
pre_token = pd.Series(data=["", ""], index=['obj', 'class'])
|
||||
else:
|
||||
pre_token = tokens.iloc[i - 1]
|
||||
|
||||
cur_tk = address_classification(cur_tk, pre_token)
|
||||
tokens.iloc[i] = cur_tk
|
||||
print(tokens.iloc[i])
|
||||
|
||||
if not accumulator["class"]:
|
||||
accumulator["class"] = cur_tk['class']
|
||||
accumulator["address"] = cur_tk["obj"]
|
||||
continue
|
||||
|
||||
if CLASSES.index(accumulator["class"][-1]) < CLASSES.index(cur_tk["class"][0]) and accumulator["class"]!="w":
|
||||
accumulator["class"] += cur_tk['class']
|
||||
accumulator["address"] += " " + cur_tk["obj"]
|
||||
else:
|
||||
ad_no_ranges = unfold_house_ranges(accumulator["address"])
|
||||
accumulator["address"] = ad_no_ranges[-1]
|
||||
|
||||
res.extend(ad_no_ranges)
|
||||
|
||||
accumulator = cut_address(accumulator, cur_tk["class"])
|
||||
|
||||
if not accumulator["class"] or CLASSES.index(cur_tk["class"][0]) <= CLASSES.index("s") or accumulator["class"]=="w":
|
||||
accumulator["class"] = cur_tk["class"]
|
||||
accumulator["address"] = cur_tk["obj"]
|
||||
|
||||
if cur_tk["class"][0] == "h":
|
||||
num = re.findall("\d{1,4} ?[\/\-]?\d* ?", cur_tk['obj'])[0]
|
||||
if any_of_in(("-я", "-й", "-Я"), accumulator["address"]):
|
||||
idx = 1
|
||||
else:
|
||||
idx = 0
|
||||
num_ac = re.findall("\d{1,4} ?[\/\-]?\d* ?", accumulator["address"])
|
||||
if num_ac:
|
||||
accumulator["address"] = re.sub(num_ac[idx], num, accumulator["address"])
|
||||
cur_tk["class"] =cur_tk["class"][1:]
|
||||
|
||||
if cur_tk["class"] and cur_tk["class"][0] == "b":
|
||||
num = re.findall("\d", cur_tk["obj"])[-1]
|
||||
if num and not "b" in accumulator["class"]:
|
||||
accumulator["class"] += "b"
|
||||
accumulator["address"] += "к." + num
|
||||
else:
|
||||
accumulator["address"] = re.sub(r"\d$", num, accumulator["address"])
|
||||
cur_tk["class"] = cur_tk["class"][1:]
|
||||
|
||||
if cur_tk["class"] and cur_tk["class"][0] == "e":
|
||||
num = re.findall("стр\.? ?\d", cur_tk["obj"].strip())[-1]
|
||||
accumulator["address"] = re.sub(r"cтр\. ?\d", num, accumulator["address"].strip())
|
||||
if num and not "e" in accumulator["class"]:
|
||||
accumulator["class"] += "e"
|
||||
cur_tk["class"] = cur_tk["class"][1:]
|
||||
|
||||
if cur_tk["class"] and cur_tk["class"][0] == "l":
|
||||
num = re.findall("[А-Яа-я]", cur_tk["obj"].strip())[-1]
|
||||
accumulator["address"] = re.sub(r"[А-Яа-я]$", "", accumulator["address"].strip())
|
||||
accumulator["address"] += num
|
||||
if num and not "l" in accumulator["class"]:
|
||||
accumulator["class"] += "l"
|
||||
else:
|
||||
if re.search(r"\d{1,3}([А-Я]|[а-я])( |$)", accumulator["address"]):
|
||||
accumulator["address"] = re.sub(r"[А-Яа-я]$", "", accumulator["address"].strip())
|
||||
if cur_tk["class"] and cur_tk["class"][0] == "r":
|
||||
num = re.findall("пом\. ?\-?\d*\w?", cur_tk["obj"].strip())[-1]
|
||||
accumulator["address"] = re.sub(r"пом\. ?\d\-?\d*\w?", num, accumulator["address"].strip())
|
||||
if num and not "r" in accumulator["class"]:
|
||||
accumulator["class"] += "r"
|
||||
cur_tk["class"] = cur_tk["class"][1:]
|
||||
res.extend(unfold_house_ranges(accumulator["address"]))
|
||||
print(res)
|
||||
return res
|
||||
|
||||
return [address]
|
||||
|
||||
def split_pesoch_res(address: str) -> List[str]:
|
||||
t = re.sub(r",", " ", address)
|
||||
t = re.split(r"(Санкт-Петербург|Ленинградская обл|Л\.О)", t)
|
||||
t = list(map(str.strip, filter(lambda token: token != "", t)))
|
||||
tokens = [t[i] + " " + t[i+1] for i in range(0, len(t)-1, 2)]
|
||||
|
||||
if tokens:
|
||||
return list(set(tokens))
|
||||
return [address]
|
||||
|
||||
def process_row(row: pd.Series[str]) -> pd.Series[str]:
|
||||
row = row.copy()
|
||||
|
||||
if pd.isnull(row["Улица"]):
|
||||
row["Улица"] = [None]
|
||||
else:
|
||||
if row["РЭС"] == "Песочинский РЭС":
|
||||
addresses = split_pesoch_res(row["Улица"])
|
||||
else:
|
||||
addresses = split_address(row["Улица"])
|
||||
row["Улица"] = addresses
|
||||
|
||||
return row
|
||||
|
||||
|
||||
def split_addresses(df: pd.DataFrame) -> pd.DataFrame:
|
||||
merged_df = df.apply(process_row, axis=1).reset_index()
|
||||
|
||||
return merged_df.explode("Улица", ignore_index=True)
|
12
parser/address/__init__.py
Normal file
12
parser/address/__init__.py
Normal file
@ -0,0 +1,12 @@
|
||||
from .classifier import CLASSES, address_classification
|
||||
from .splitter import AddressSplitter, split_addresses, split_pesoch_res
|
||||
from .utils import create_token
|
||||
|
||||
__all__ = (
|
||||
"address_classification",
|
||||
"AddressSplitter",
|
||||
"CLASSES",
|
||||
"create_token",
|
||||
"split_addresses",
|
||||
"split_pesoch_res",
|
||||
)
|
222
parser/address/classifier.py
Normal file
222
parser/address/classifier.py
Normal file
@ -0,0 +1,222 @@
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .utils import any_of_in
|
||||
|
||||
CLASSES = ("w", "d", "c", "t", "s", "h", "b", "e", "l", "r")
|
||||
DISTRICTS_PREFIXES = ("мо ", "р-н", "городское", "лесхоз")
|
||||
COUNTRYSIDE_PREFIXES = (
|
||||
"г",
|
||||
"п",
|
||||
"д",
|
||||
"гп",
|
||||
"рп",
|
||||
"кп",
|
||||
"пгт",
|
||||
"c",
|
||||
"хутор",
|
||||
" урочище",
|
||||
)
|
||||
TERRITORY_PREFIXES = (
|
||||
"тер.",
|
||||
" тер",
|
||||
"снт ",
|
||||
"ст ",
|
||||
"дск ",
|
||||
"днп ",
|
||||
"дпк ",
|
||||
"нп ",
|
||||
"пдк ",
|
||||
"т/б ",
|
||||
"садоводство",
|
||||
"массив",
|
||||
"хозя",
|
||||
"сад-во",
|
||||
)
|
||||
STREET_PREFIXES = (
|
||||
" ул",
|
||||
" бул",
|
||||
" пр",
|
||||
" ш",
|
||||
" пер",
|
||||
" дор",
|
||||
" маг",
|
||||
" наб",
|
||||
" пл",
|
||||
" просп",
|
||||
" туп",
|
||||
"шоссе",
|
||||
"лини",
|
||||
"аллея",
|
||||
"мост",
|
||||
" парк",
|
||||
"кольцо",
|
||||
"проезд",
|
||||
"съезд",
|
||||
"переулок",
|
||||
"ул.",
|
||||
"бул.",
|
||||
"пр.",
|
||||
"ш.",
|
||||
"пер.",
|
||||
"дор.",
|
||||
"маг.",
|
||||
"наб.",
|
||||
"пл.",
|
||||
"просп.",
|
||||
"туп.",
|
||||
)
|
||||
HOUSES_PREFIXES = ("д.", "уч.", "участок", "мкд", "тп", "дом", "дома")
|
||||
BUILDING_PREFIXES = ("к.", "к ", "корп", "корпус")
|
||||
EDIFICE_PREFIXES = ("стр.", "строение")
|
||||
LETTER = ("лит.", "литера", " л.")
|
||||
PREFIXES = (
|
||||
DISTRICTS_PREFIXES,
|
||||
COUNTRYSIDE_PREFIXES,
|
||||
TERRITORY_PREFIXES,
|
||||
STREET_PREFIXES,
|
||||
HOUSES_PREFIXES,
|
||||
BUILDING_PREFIXES,
|
||||
EDIFICE_PREFIXES,
|
||||
LETTER,
|
||||
)
|
||||
|
||||
|
||||
def find_room(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\bпом\.?", token["obj"]):
|
||||
return "r"
|
||||
return ""
|
||||
|
||||
|
||||
def find_litera(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if find_room(token, pre_token):
|
||||
return ""
|
||||
# fmt: off
|
||||
if (
|
||||
any_of_in(LETTER, token["obj"].lower()) or
|
||||
re.search(r"\d{1,3}([А-Я]|[а-я])( |$)", token["obj"])
|
||||
):
|
||||
#fmt: on
|
||||
return "l"
|
||||
if (
|
||||
(
|
||||
re.search(r"\b([А-Я]|[а-я]){1}$", token["obj"])
|
||||
and ("l" in pre_token["class"] or "h" in pre_token["class"])
|
||||
)
|
||||
and (" ш" not in token["obj"])
|
||||
and not find_countryside(token, pre_token)
|
||||
):
|
||||
return "l"
|
||||
return ""
|
||||
|
||||
|
||||
def find_edifice(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(EDIFICE_PREFIXES, token["obj"].lower()):
|
||||
return "e"
|
||||
return ""
|
||||
|
||||
|
||||
def find_building(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\d", token["obj"]) and not find_room(token, pre_token):
|
||||
if (
|
||||
any_of_in(BUILDING_PREFIXES, token["obj"].lower())
|
||||
or "b" in pre_token["class"]
|
||||
and ("h" not in token["class"])
|
||||
and not find_edifice(token, pre_token)
|
||||
or re.search(r"к\.* ?\d", token["obj"])
|
||||
):
|
||||
return "b"
|
||||
return ""
|
||||
|
||||
|
||||
def find_house(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if re.search(r"\d{1,4}", token["obj"]) and not find_room(token, pre_token):
|
||||
if any_of_in(HOUSES_PREFIXES, token["obj"].lower()):
|
||||
return "h"
|
||||
if re.search(r"(д|д\.) ?\d{1,4} ?\/*\d* ?", token["obj"]):
|
||||
return "h"
|
||||
if (
|
||||
(
|
||||
"s" in pre_token["class"]
|
||||
or "h" in pre_token["class"]
|
||||
or "s" in token["class"]
|
||||
)
|
||||
and not any_of_in(("-я", "-й", "-Я"), token["obj"])
|
||||
and not find_building(token, pre_token)
|
||||
and not find_edifice(token, pre_token)
|
||||
):
|
||||
return "h"
|
||||
if (
|
||||
find_building(token, pre_token)
|
||||
and not any_of_in(("-я", "-й", "-Я"), token["obj"])
|
||||
and True
|
||||
):
|
||||
if len(re.findall(r"\d{1,4}", token["obj"])) > 1:
|
||||
return "h"
|
||||
if int(re.search(r"\d{1,4}", token["obj"]).group()) // 10 > 0:
|
||||
return "h"
|
||||
return ""
|
||||
|
||||
|
||||
def find_street(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(STREET_PREFIXES, token["obj"].lower()):
|
||||
return "s"
|
||||
if (
|
||||
re.search(r"\b[А-Яа-я]{4,}\b", token["obj"])
|
||||
and not any(
|
||||
[el in token["obj"].lower() for pr in PREFIXES for el in pr if len(el) > 2]
|
||||
)
|
||||
and not (
|
||||
"d" in token["class"] or "t" in token["class"] or "c" in token["class"]
|
||||
)
|
||||
):
|
||||
return "s"
|
||||
return ""
|
||||
|
||||
|
||||
def find_territory(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(TERRITORY_PREFIXES, token["obj"].lower()):
|
||||
return "t"
|
||||
return ""
|
||||
|
||||
|
||||
def find_countryside(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if (
|
||||
any_of_in(COUNTRYSIDE_PREFIXES, token["obj"].lower())
|
||||
and re.search(r"\b[гпдрпктc]{1,3}(\b|\. )", token["obj"])
|
||||
and not find_house(token, pre_token)
|
||||
and not any_of_in(STREET_PREFIXES, token["obj"].lower())
|
||||
):
|
||||
return "c"
|
||||
return ""
|
||||
|
||||
|
||||
def find_district(token: pd.Series, pre_token: pd.Series) -> str:
|
||||
if any_of_in(DISTRICTS_PREFIXES, token["obj"].lower()):
|
||||
return "d"
|
||||
return ""
|
||||
|
||||
|
||||
def address_classification(token: pd.Series, pre_token: pd.Series) -> pd.Series:
|
||||
brackets = re.search(r"\(.+\)", token["obj"])
|
||||
if brackets:
|
||||
token["obj"] = re.sub(r"\(.+\)", "()", token["obj"])
|
||||
|
||||
token["class"] += find_district(token, pre_token)
|
||||
token["class"] += find_countryside(token, pre_token)
|
||||
token["class"] += find_territory(token, pre_token)
|
||||
token["class"] += find_street(token, pre_token)
|
||||
token["class"] += find_house(token, pre_token)
|
||||
token["class"] += find_building(token, pre_token)
|
||||
token["class"] += find_edifice(token, pre_token)
|
||||
token["class"] += find_litera(token, pre_token)
|
||||
token["class"] += find_room(token, pre_token)
|
||||
|
||||
if token["class"] == "":
|
||||
token["class"] = "w"
|
||||
|
||||
if brackets:
|
||||
token["obj"] = re.sub(r"\(\)", brackets.group(), token["obj"])
|
||||
|
||||
return token
|
295
parser/address/splitter.py
Normal file
295
parser/address/splitter.py
Normal file
@ -0,0 +1,295 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .classifier import CLASSES, address_classification
|
||||
from .utils import any_of_in, create_token, is_valid_token, unfold_house_ranges
|
||||
|
||||
|
||||
class AddressSplitter(Sequence):
|
||||
def __init__(self, address: str):
|
||||
self.input = address
|
||||
|
||||
self.addresses = self.split()
|
||||
|
||||
# Sequence abstract methods implementation
|
||||
|
||||
def __getitem__(self, key: int):
|
||||
if key < len(self.addresses):
|
||||
return self.addresses[key]
|
||||
else:
|
||||
raise IndexError()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.addresses)
|
||||
|
||||
# Address token class manipulations
|
||||
|
||||
def next_class(self) -> str:
|
||||
return self.token["class"][0]
|
||||
|
||||
def prev_class(self) -> str:
|
||||
return self.accumulator["class"][-1]
|
||||
|
||||
def correct_order(self) -> bool:
|
||||
return (
|
||||
len(self.accumulator["class"]) > 0
|
||||
and CLASSES.index(self.prev_class()) < CLASSES.index(self.next_class())
|
||||
and self.accumulator["class"] != "w"
|
||||
)
|
||||
|
||||
def next_class_is(self, comparing_class: str) -> bool:
|
||||
return len(self.token["class"]) > 0 and self.next_class() == comparing_class[0]
|
||||
|
||||
def has_no_class(self, comparing_class: str) -> bool:
|
||||
return comparing_class[0] not in self.accumulator["class"]
|
||||
|
||||
def pop_token_class(self):
|
||||
self.token["class"] = self.token["class"][1:]
|
||||
|
||||
# Accumulator constrains
|
||||
|
||||
def next_is_street_or_upper(self) -> bool:
|
||||
is_unknown_class = self.accumulator["class"] in ("", "w")
|
||||
|
||||
return (
|
||||
CLASSES.index(self.next_class()) <= CLASSES.index("s") or is_unknown_class
|
||||
)
|
||||
|
||||
def has_numbered_street(self) -> bool:
|
||||
return any_of_in(("-я", "-й", "-Я"), self.accumulator["address"])
|
||||
|
||||
# Accumulator manipulation
|
||||
|
||||
## House
|
||||
|
||||
def substitue_house(self) -> str:
|
||||
house_regex = re.compile(r"\d{1,4} ?[\/\-]?\d* ?")
|
||||
|
||||
number = house_regex.findall(self.token["obj"])[0]
|
||||
|
||||
if self.has_numbered_street():
|
||||
house_number_index = 1
|
||||
else:
|
||||
house_number_index = 0
|
||||
|
||||
number_in_accumulator = house_regex.findall(self.accumulator["address"])
|
||||
|
||||
if number_in_accumulator:
|
||||
return re.sub(
|
||||
number_in_accumulator[house_number_index],
|
||||
number,
|
||||
self.accumulator["address"],
|
||||
)
|
||||
else:
|
||||
return self.accumulator["address"]
|
||||
|
||||
## Building
|
||||
|
||||
def append_building(self, number: int) -> pd.Series:
|
||||
self.accumulator["class"] += "b"
|
||||
self.accumulator["address"] += "к." + number
|
||||
|
||||
return self.accumulator
|
||||
|
||||
def substitue_building(self, number: int) -> str:
|
||||
return re.sub(r"\d$", number, self.accumulator["address"])
|
||||
|
||||
def insert_building(self):
|
||||
number = re.findall(r"\d", self.token["obj"])[-1]
|
||||
|
||||
if number and self.has_no_class("building"):
|
||||
self.accumulator = self.append_building(number)
|
||||
else:
|
||||
self.accumulator["address"] = self.substitue_building(number)
|
||||
|
||||
## Edifice
|
||||
|
||||
def substitue_edifice(self, number: int) -> str:
|
||||
return re.sub(r"cтр\. ?\d", number, self.accumulator["address"].strip())
|
||||
|
||||
def insert_edifice(self):
|
||||
number = re.findall("стр\.? ?\d", self.token["obj"])[-1]
|
||||
|
||||
self.accumulator["address"] = self.substitue_edifice(number)
|
||||
|
||||
if number and self.has_no_class("edifice"):
|
||||
self.accumulator["class"] += "e"
|
||||
|
||||
## Letter
|
||||
|
||||
def without_letter(self) -> str:
|
||||
return re.sub(r"[А-Яа-я]$", "", self.accumulator["address"].strip())
|
||||
|
||||
def substitue_letter(self, letter: str) -> str:
|
||||
address_without_letter = self.without_letter()
|
||||
|
||||
return address_without_letter + letter
|
||||
|
||||
def insert_letter(self):
|
||||
letter = re.findall(r"[А-Яа-я]", self.token["obj"])[-1]
|
||||
self.accumulator["address"] = self.substitue_letter(letter)
|
||||
|
||||
if letter and self.has_no_class("litera"):
|
||||
self.accumulator["class"] += "l"
|
||||
|
||||
def has_letter_in(self) -> bool:
|
||||
return re.search(r"\d{1,3}([А-Я]|[а-я])( |$)", self.accumulator["address"])
|
||||
|
||||
## Room
|
||||
|
||||
def substitue_room(self, number: int) -> str:
|
||||
return re.sub(
|
||||
r"пом\. ?\d\-?\d*\w?", number, self.accumulator["address"].strip()
|
||||
)
|
||||
|
||||
def insert_room(self):
|
||||
number = re.findall("пом\. ?\-?\d*\w?", self.token["obj"])[-1]
|
||||
self.accumulator["address"] = self.substitue_room(number)
|
||||
|
||||
if number and self.has_no_class("room"):
|
||||
self.accumulator["class"] += "r"
|
||||
|
||||
# Data preprocessing
|
||||
|
||||
def split_tokens(self) -> list[pd.Series]:
|
||||
address = self.input.replace(";", ",")
|
||||
|
||||
parts = address.split(",")
|
||||
parts = map(str.strip, parts)
|
||||
parts = filter(is_valid_token, parts)
|
||||
|
||||
tokens = map(lambda part: create_token(part, ""), parts)
|
||||
|
||||
return list(tokens)
|
||||
|
||||
def cut_address(self) -> pd.Series:
|
||||
# fmt: off
|
||||
while (
|
||||
len(self.accumulator["class"]) > 0
|
||||
and CLASSES.index(self.prev_class()) > CLASSES.index(self.next_class())
|
||||
):
|
||||
# fmt: on
|
||||
match self.accumulator["class"][-1]:
|
||||
case "h":
|
||||
self.accumulator["addresses"] = re.sub(
|
||||
r"[мкдтпучасток]*\.? ?\d{1,4} ?\/*\d* ?",
|
||||
"",
|
||||
self.accumulator["address"].lower(),
|
||||
)
|
||||
case "b":
|
||||
number = re.findall(r"к{0,1}\.? ?\d", self.accumulator["address"])[
|
||||
-1
|
||||
]
|
||||
self.accumulator["address"] = re.sub(
|
||||
number, "", self.accumulator["address"]
|
||||
)
|
||||
case "e":
|
||||
self.accumulator["address"] = re.sub(
|
||||
r"cтр\.? ?\d", "", self.accumulator["address"]
|
||||
)
|
||||
case "l":
|
||||
self.accumulator["address"] = re.sub(
|
||||
r"[литера]*\.? ?[А-Яа-я]{1}$", "", self.accumulator["address"]
|
||||
)
|
||||
case "r":
|
||||
self.accumulator["address"] = re.sub(
|
||||
r"пом\.? ?\d+", "", self.accumulator["address"]
|
||||
)
|
||||
|
||||
self.accumulator["class"] = self.accumulator["class"][:-1]
|
||||
|
||||
return self.accumulator
|
||||
|
||||
# Splitting
|
||||
|
||||
def split(self):
|
||||
self.tokens = self.split_tokens()
|
||||
|
||||
result = []
|
||||
|
||||
self.accumulator = pd.Series({"address": "", "class": ""})
|
||||
|
||||
prev_token = create_token()
|
||||
|
||||
for cursor in self.tokens:
|
||||
self.token = address_classification(cursor, prev_token)
|
||||
prev_token = self.token.copy()
|
||||
|
||||
if self.accumulator["class"] == "":
|
||||
self.accumulator = self.token.rename({"obj": "address"})
|
||||
continue
|
||||
|
||||
if self.correct_order():
|
||||
self.accumulator["address"] += " "
|
||||
self.accumulator += self.token.rename({"obj": "address"})
|
||||
else:
|
||||
unfolded_address = unfold_house_ranges(self.accumulator["address"])
|
||||
self.accumulator["address"] = unfolded_address[-1]
|
||||
|
||||
result.extend(unfolded_address)
|
||||
|
||||
self.accumulator = self.cut_address()
|
||||
|
||||
if self.next_is_street_or_upper():
|
||||
self.accumulator = self.token.rename({"obj": "address"})
|
||||
|
||||
if self.next_class_is("house"):
|
||||
self.accumulator["address"] = self.substitue_house()
|
||||
self.pop_token_class()
|
||||
|
||||
if self.next_class_is("building"):
|
||||
self.insert_building()
|
||||
self.pop_token_class()
|
||||
|
||||
if self.next_class_is("edifice"):
|
||||
self.insert_edifice()
|
||||
self.pop_token_class()
|
||||
|
||||
if self.next_class_is("letter"):
|
||||
self.insert_letter()
|
||||
elif self.has_letter_in():
|
||||
self.accumulator["address"] = self.without_letter()
|
||||
|
||||
if self.next_class_is("room"):
|
||||
self.insert_room()
|
||||
self.pop_token_class()
|
||||
|
||||
result.extend(unfold_house_ranges(self.accumulator["address"]))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def split_pesoch_res(address: str) -> list[str]:
|
||||
t = re.sub(r",", " ", address)
|
||||
t = re.split(r"(Санкт-Петербург|Ленинградская обл|Л\.О)", t)
|
||||
t = list(map(str.strip, filter(lambda token: token != "", t)))
|
||||
tokens = [t[i] + " " + t[i + 1] for i in range(0, len(t) - 1, 2)]
|
||||
|
||||
if tokens:
|
||||
return list(set(tokens))
|
||||
return [address]
|
||||
|
||||
|
||||
def process_row(row: pd.Series[str]) -> pd.Series[str]:
|
||||
row = row.copy()
|
||||
|
||||
if pd.isnull(row["Улица"]):
|
||||
row["Улица"] = [None]
|
||||
else:
|
||||
if row["РЭС"] == "Песочинский РЭС":
|
||||
addresses = split_pesoch_res(row["Улица"])
|
||||
else:
|
||||
addresses = AddressSplitter(row["Улица"])
|
||||
row["Улица"] = addresses
|
||||
|
||||
return row
|
||||
|
||||
|
||||
def split_addresses(df: pd.DataFrame) -> pd.DataFrame:
|
||||
merged_df = df.apply(process_row, axis=1).reset_index()
|
||||
|
||||
return merged_df.explode("Улица", ignore_index=True)
|
47
parser/address/utils.py
Normal file
47
parser/address/utils.py
Normal file
@ -0,0 +1,47 @@
|
||||
import re
|
||||
from collections.abc import Iterable
|
||||
from typing import TypeVar
|
||||
|
||||
import pandas as pd
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def any_of_in(substrings: Iterable[str], string: str) -> bool:
|
||||
return any(map(lambda substring: substring in string, substrings))
|
||||
|
||||
|
||||
def flatten(arr: Iterable[list[T]]) -> list[T]:
|
||||
return sum(arr, [])
|
||||
|
||||
|
||||
def unfold_house_ranges(token: str) -> list[str]:
|
||||
addresses = []
|
||||
pairs_strings = re.findall(r"([\d]+-[\d]+)", token)
|
||||
for pair_string in pairs_strings:
|
||||
a, b = pair_string.split("-")
|
||||
a, b = int(a), int(b)
|
||||
|
||||
if b > a:
|
||||
addresses += [
|
||||
re.sub(r"([\d]+-[\d]+)", number, token)
|
||||
for number in map(str, range(a, b + 1))
|
||||
]
|
||||
else:
|
||||
token = token.replace("-", "/")
|
||||
if not addresses:
|
||||
addresses.append(token)
|
||||
return addresses
|
||||
|
||||
|
||||
def is_valid_token(string: str) -> bool:
|
||||
return string not in ("", "уг.", "д.")
|
||||
|
||||
|
||||
def create_token(obj: str = "", token_class: str = ""):
|
||||
return pd.Series(
|
||||
{
|
||||
"obj": obj,
|
||||
"class": token_class,
|
||||
}
|
||||
)
|
@ -45,7 +45,10 @@ async def async_fetch_building_id(
|
||||
async with session.get(
|
||||
"https://geocode.gate.petersburg.ru/parse/eas", params={"street": street}
|
||||
) as r:
|
||||
res = await r.json()
|
||||
try:
|
||||
res = await r.json()
|
||||
except aiohttp.client_exceptions.ContentTypeError:
|
||||
res = "error"
|
||||
|
||||
if "error" in res:
|
||||
return None, None, None
|
||||
|
@ -1,16 +1,14 @@
|
||||
from typing import Optional
|
||||
|
||||
from . import (
|
||||
LenenergoParser,
|
||||
concurrent_fetch_builing_ids,
|
||||
preprocess_df,
|
||||
split_addresses,
|
||||
)
|
||||
from .address import split_addresses
|
||||
from .building_id import concurrent_fetch_builing_ids
|
||||
from .lenenergo import LenenergoParser
|
||||
from .preprocess import preprocess_df
|
||||
|
||||
|
||||
def pipeline(parser: Optional[LenenergoParser] = None) -> LenenergoParser:
|
||||
if parser is None:
|
||||
parser = LenenergoParser(file_path = r"C:\Users\Юля\PycharmProjects\machine_learning\lenengro_parser\data_Rosseti.csv")
|
||||
parser = LenenergoParser()
|
||||
|
||||
print(parser)
|
||||
|
@ -21,6 +21,23 @@ COL_NS = {
|
||||
"lng": "Долгота",
|
||||
}
|
||||
|
||||
PR_COL_NS = (
|
||||
"index",
|
||||
"region",
|
||||
"area",
|
||||
"town",
|
||||
"street",
|
||||
"branch",
|
||||
"res",
|
||||
"comment",
|
||||
"building_id",
|
||||
"lat",
|
||||
"lng",
|
||||
"start",
|
||||
"finish",
|
||||
)
|
||||
|
||||
|
||||
ICOL_NS = dict(map(reversed, COL_NS.items()))
|
||||
|
||||
|
||||
|
@ -1,11 +1,17 @@
|
||||
import schedule
|
||||
|
||||
from .config import REFETCH_PERIOD_H
|
||||
from .config import REFETCH_PERIOD_H, STORE_NULL_BID
|
||||
from .job import job
|
||||
from .scheduler import run_continuously
|
||||
|
||||
schedule.every(REFETCH_PERIOD_H).hours.do(job)
|
||||
stop_run_continuously = run_continuously()
|
||||
|
||||
print(
|
||||
f"Scheduled to run every {REFETCH_PERIOD_H} hour and "
|
||||
+ ("" if STORE_NULL_BID else "not ")
|
||||
+ "to store NULL building_id"
|
||||
)
|
||||
|
||||
# First run
|
||||
job()
|
||||
|
@ -1,6 +1,8 @@
|
||||
import os
|
||||
from parser import PR_COL_NS
|
||||
|
||||
REFETCH_PERIOD_H = int(os.environ.get("REFETCH_PERIOD_H", "4"))
|
||||
STORE_NULL_BID = os.environ.get("STORE_NULL_BID", "False") == "True"
|
||||
|
||||
POSTGRES_USER = os.environ.get("POSTGRES_USER", "lenenergo")
|
||||
POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "lenenergo")
|
||||
@ -9,3 +11,9 @@ POSTGRES_HOST = os.environ.get("POSTGRES_HOST", "localhost")
|
||||
POSTGRES_PORT = int(os.environ.get("POSTGRES_PORT", "5432"))
|
||||
|
||||
DB_URL = os.environ.get("DB_URL", None)
|
||||
|
||||
DB_COLUMNS_MAP = dict(zip(PR_COL_NS, PR_COL_NS))
|
||||
"""
|
||||
Feel free to rewrite mapping like
|
||||
DB_COLUMNS_MAP["<COL_NS key>"] = "<corresponding db column name>"
|
||||
"""
|
||||
|
@ -1,10 +1,10 @@
|
||||
from .config import (
|
||||
DB_URL,
|
||||
POSTGRES_DB,
|
||||
POSTGRES_HOST,
|
||||
POSTGRES_PASSWORD,
|
||||
POSTGRES_PORT,
|
||||
POSTGRES_USER,
|
||||
DB_URL,
|
||||
)
|
||||
|
||||
db_credentials = {"conninfo": DB_URL}
|
||||
|
@ -4,23 +4,12 @@ from parser import pipeline
|
||||
import pandas as pd
|
||||
import psycopg
|
||||
|
||||
from .config import DB_COLUMNS_MAP, STORE_NULL_BID
|
||||
from .database import db_credentials
|
||||
|
||||
sql_statement = """COPY records (
|
||||
index,
|
||||
region,
|
||||
area,
|
||||
town,
|
||||
street,
|
||||
branch,
|
||||
res,
|
||||
comment,
|
||||
building_id,
|
||||
lat,
|
||||
lng,
|
||||
start,
|
||||
finish
|
||||
) FROM STDIN"""
|
||||
sql_statement = "".join(
|
||||
("COPY records (", ", ".join(DB_COLUMNS_MAP.values()), ") FROM STDIN")
|
||||
)
|
||||
|
||||
|
||||
def job():
|
||||
@ -36,6 +25,8 @@ def job():
|
||||
with cursor.copy(sql_statement) as copy:
|
||||
for _, row in parser.df.iterrows():
|
||||
row = row.where((pd.notnull(row)), None)
|
||||
copy.write_row(row.to_list())
|
||||
if row["building_id"] is not None or STORE_NULL_BID:
|
||||
db_row = row.rename(DB_COLUMNS_MAP)
|
||||
copy.write_row(db_row.to_list())
|
||||
|
||||
print(f"Fetched in {datetime.now() - fetch_start}\n{parser}")
|
||||
|
Reference in New Issue
Block a user