async-revised
All checks were successful
Deploy on push / deploy (push) Successful in 5s

This commit is contained in:
Untone 2024-11-02 00:26:57 +03:00
parent 54c59d26b9
commit 0c009495a3
6 changed files with 91 additions and 55 deletions

10
cache/revalidator.py vendored
View File

@ -14,7 +14,7 @@ class CacheRevalidationManager:
async def start(self): async def start(self):
"""Запуск фонового воркера для ревалидации кэша.""" """Запуск фонового воркера для ревалидации кэша."""
asyncio.create_task(self.revalidate_cache()) self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self): async def revalidate_cache(self):
"""Циклическая проверка и ревалидация кэша каждые self.interval секунд.""" """Циклическая проверка и ревалидация кэша каждые self.interval секунд."""
@ -48,9 +48,15 @@ class CacheRevalidationManager:
"""Отметить сущность для ревалидации.""" """Отметить сущность для ревалидации."""
self.items_to_revalidate[entity_type].add(entity_id) self.items_to_revalidate[entity_type].add(entity_id)
def stop(self): async def stop(self):
"""Остановка фонового воркера.""" """Остановка фонового воркера."""
self.running = False self.running = False
if hasattr(self, 'task'):
self.task.cancel()
try:
await self.task
except asyncio.CancelledError:
pass
revalidation_manager = CacheRevalidationManager(interval=300) # Ревалидация каждые 5 минут revalidation_manager = CacheRevalidationManager(interval=300) # Ревалидация каждые 5 минут

11
main.py
View File

@ -74,7 +74,7 @@ async def create_all_tables_async():
async def lifespan(app): async def lifespan(app):
# Запуск всех сервисов при старте приложения try:
await asyncio.gather( await asyncio.gather(
create_all_tables_async(), create_all_tables_async(),
redis.connect(), redis.connect(),
@ -85,8 +85,13 @@ async def lifespan(app):
revalidation_manager.start(), revalidation_manager.start(),
) )
yield yield
# Остановка сервисов при завершении работы приложения finally:
await redis.disconnect() tasks = [
redis.disconnect(),
ViewedStorage.stop(),
revalidation_manager.stop()
]
await asyncio.gather(*tasks, return_exceptions=True)
# Создаем экземпляр GraphQL # Создаем экземпляр GraphQL

View File

@ -1,5 +1,3 @@
import subprocess
from granian.constants import Interfaces from granian.constants import Interfaces
from granian.log import LogLevels from granian.log import LogLevels
from granian.server import Granian from granian.server import Granian
@ -8,23 +6,24 @@ from settings import PORT
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
def is_docker_container_running(name):
cmd = ["docker", "ps", "-f", f"name={name}"]
output = subprocess.run(cmd, capture_output=True, text=True).stdout
logger.info(output)
return name in output
if __name__ == "__main__": if __name__ == "__main__":
logger.info("started") logger.info("started")
try:
granian_instance = Granian( granian_instance = Granian(
"main:app", "main:app",
address="0.0.0.0", # noqa S104 address="0.0.0.0",
port=PORT, port=PORT,
interface=Interfaces.ASGI, interface=Interfaces.ASGI,
threads=4, threads=4,
websockets=False, websockets=False,
log_level=LogLevels.debug, log_level=LogLevels.debug,
backlog=2048,
) )
granian_instance.serve() granian_instance.serve()
except Exception as error:
logger.error(f"Granian error: {error}", exc_info=True)
raise
finally:
logger.info("stopped")

View File

@ -22,7 +22,11 @@ if DB_URL.startswith("postgres"):
max_overflow=20, max_overflow=20,
pool_timeout=30, # Время ожидания свободного соединения pool_timeout=30, # Время ожидания свободного соединения
pool_recycle=1800, # Время жизни соединения pool_recycle=1800, # Время жизни соединения
connect_args={"sslmode": "disable"}, pool_pre_ping=True, # Добавить проверку соединений
connect_args={
"sslmode": "disable",
"connect_timeout": 40 # Добавить таймаут подключения
}
) )
else: else:
engine = create_engine(DB_URL, echo=False, connect_args={"check_same_thread": False}) engine = create_engine(DB_URL, echo=False, connect_args={"check_same_thread": False})

View File

@ -166,7 +166,19 @@ class SearchService:
async def perform_index(self, shout, index_body): async def perform_index(self, shout, index_body):
if self.client: if self.client:
self.client.index(index=self.index_name, id=str(shout.id), body=index_body) try:
await asyncio.wait_for(
self.client.index(
index=self.index_name,
id=str(shout.id),
body=index_body
),
timeout=40.0
)
except asyncio.TimeoutError:
logger.error(f"Indexing timeout for shout {shout.id}")
except Exception as e:
logger.error(f"Indexing error for shout {shout.id}: {e}")
async def search(self, text, limit, offset): async def search(self, text, limit, offset):
logger.info(f"Ищем: {text} {offset}+{limit}") logger.info(f"Ищем: {text} {offset}+{limit}")

View File

@ -37,6 +37,12 @@ class ViewedStorage:
auth_result = None auth_result = None
disabled = False disabled = False
start_date = datetime.now().strftime("%Y-%m-%d") start_date = datetime.now().strftime("%Y-%m-%d")
running = True
@staticmethod
async def stop():
self = ViewedStorage
self.running = False
@staticmethod @staticmethod
async def init(): async def init():
@ -196,17 +202,21 @@ class ViewedStorage:
if self.disabled: if self.disabled:
return return
while True: try:
while self.running:
try: try:
await self.update_pages() await self.update_pages()
failed = 0 failed = 0
except Exception as exc: except Exception as exc:
failed += 1 failed += 1
logger.debug(exc) logger.debug(exc)
logger.info(" - update failed #%d, wait 10 secs" % failed) logger.warning(" - update failed #%d, wait 10 secs" % failed)
if failed > 3: if failed > 3 or isinstance(exc, asyncio.CancelledError):
logger.info(" - views update failed, not trying anymore") logger.error("ViewedStorage worker cancelled")
break break
finally:
self.running = False
if failed == 0: if failed == 0:
when = datetime.now(timezone.utc) + timedelta(seconds=self.period) when = datetime.now(timezone.utc) + timedelta(seconds=self.period)
t = format(when.astimezone().isoformat()) t = format(when.astimezone().isoformat())