1 Commits

Author SHA1 Message Date
Stepan Vladovskiy
27b0928e73 feat: title weight procedure
All checks were successful
Deploy on push / deploy (push) Successful in 1m16s
2025-04-15 19:32:34 -03:00
26 changed files with 678 additions and 1445 deletions

View File

@@ -1,14 +1,3 @@
#### [0.4.20] - 2025-05-03
- Исправлена ошибка в классе `CacheRevalidationManager`: добавлена инициализация атрибута `_redis`
- Улучшена обработка соединения с Redis в менеджере ревалидации кэша:
- Автоматическое восстановление соединения в случае его потери
- Проверка соединения перед выполнением операций с кэшем
- Дополнительное логирование для упрощения диагностики проблем
- Исправлен резолвер `unpublish_shout`:
- Корректное формирование синтетического поля `publication` с `published_at: null`
- Возвращение полноценного словаря с данными вместо объекта модели
- Улучшена загрузка связанных данных (авторы, темы) для правильного формирования ответа
#### [0.4.19] - 2025-04-14 #### [0.4.19] - 2025-04-14
- dropped `Shout.description` and `Draft.description` to be UX-generated - dropped `Shout.description` and `Draft.description` to be UX-generated
- use redis to init views counters after migrator - use redis to init views counters after migrator

View File

@@ -3,7 +3,6 @@ FROM python:slim
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
postgresql-client \ postgresql-client \
curl \ curl \
build-essential \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
WORKDIR /app WORKDIR /app

View File

@@ -1 +0,0 @@

12
cache/precache.py vendored
View File

@@ -77,15 +77,11 @@ async def precache_topics_followers(topic_id: int, session):
async def precache_data(): async def precache_data():
logger.info("precaching...") logger.info("precaching...")
logger.debug("Entering precache_data")
try: try:
key = "authorizer_env" key = "authorizer_env"
logger.debug(f"Fetching existing hash for key '{key}' from Redis")
# cache reset # cache reset
value = await redis.execute("HGETALL", key) value = await redis.execute("HGETALL", key)
logger.debug(f"Fetched value for '{key}': {value}")
await redis.execute("FLUSHDB") await redis.execute("FLUSHDB")
logger.debug("Redis database flushed")
logger.info("redis: FLUSHDB") logger.info("redis: FLUSHDB")
# Преобразуем словарь в список аргументов для HSET # Преобразуем словарь в список аргументов для HSET
@@ -101,27 +97,21 @@ async def precache_data():
await redis.execute("HSET", key, *value) await redis.execute("HSET", key, *value)
logger.info(f"redis hash '{key}' was restored") logger.info(f"redis hash '{key}' was restored")
logger.info("Beginning topic precache phase")
with local_session() as session: with local_session() as session:
# topics # topics
q = select(Topic).where(Topic.community == 1) q = select(Topic).where(Topic.community == 1)
topics = get_with_stat(q) topics = get_with_stat(q)
logger.info(f"Found {len(topics)} topics to precache")
for topic in topics: for topic in topics:
topic_dict = topic.dict() if hasattr(topic, "dict") else topic topic_dict = topic.dict() if hasattr(topic, "dict") else topic
logger.debug(f"Precaching topic id={topic_dict.get('id')}")
await cache_topic(topic_dict) await cache_topic(topic_dict)
logger.debug(f"Cached topic id={topic_dict.get('id')}")
await asyncio.gather( await asyncio.gather(
precache_topics_followers(topic_dict["id"], session), precache_topics_followers(topic_dict["id"], session),
precache_topics_authors(topic_dict["id"], session), precache_topics_authors(topic_dict["id"], session),
) )
logger.debug(f"Finished precaching followers and authors for topic id={topic_dict.get('id')}")
logger.info(f"{len(topics)} topics and their followings precached") logger.info(f"{len(topics)} topics and their followings precached")
# authors # authors
authors = get_with_stat(select(Author).where(Author.user.is_not(None))) authors = get_with_stat(select(Author).where(Author.user.is_not(None)))
logger.info(f"Found {len(authors)} authors to precache")
logger.info(f"{len(authors)} authors found in database") logger.info(f"{len(authors)} authors found in database")
for author in authors: for author in authors:
if isinstance(author, Author): if isinstance(author, Author):
@@ -129,12 +119,10 @@ async def precache_data():
author_id = profile.get("id") author_id = profile.get("id")
user_id = profile.get("user", "").strip() user_id = profile.get("user", "").strip()
if author_id and user_id: if author_id and user_id:
logger.debug(f"Precaching author id={author_id}")
await cache_author(profile) await cache_author(profile)
await asyncio.gather( await asyncio.gather(
precache_authors_followers(author_id, session), precache_authors_follows(author_id, session) precache_authors_followers(author_id, session), precache_authors_follows(author_id, session)
) )
logger.debug(f"Finished precaching followers and follows for author id={author_id}")
else: else:
logger.error(f"fail caching {author}") logger.error(f"fail caching {author}")
logger.info(f"{len(authors)} authors and their followings precached") logger.info(f"{len(authors)} authors and their followings precached")

15
cache/revalidator.py vendored
View File

@@ -8,7 +8,6 @@ from cache.cache import (
invalidate_cache_by_prefix, invalidate_cache_by_prefix,
) )
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.redis import redis
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes
@@ -22,19 +21,9 @@ class CacheRevalidationManager:
self.lock = asyncio.Lock() self.lock = asyncio.Lock()
self.running = True self.running = True
self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки
self._redis = redis # Добавлена инициализация _redis для доступа к Redis-клиенту
async def start(self): async def start(self):
"""Запуск фонового воркера для ревалидации кэша.""" """Запуск фонового воркера для ревалидации кэша."""
# Проверяем, что у нас есть соединение с Redis
if not self._redis._client:
logger.warning("Redis connection not established. Waiting for connection...")
try:
await self._redis.connect()
logger.info("Redis connection established for revalidation manager")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
self.task = asyncio.create_task(self.revalidate_cache()) self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self): async def revalidate_cache(self):
@@ -50,10 +39,6 @@ class CacheRevalidationManager:
async def process_revalidation(self): async def process_revalidation(self):
"""Обновление кэша для всех сущностей, требующих ревалидации.""" """Обновление кэша для всех сущностей, требующих ревалидации."""
# Проверяем соединение с Redis
if not self._redis._client:
return # Выходим из метода, если не удалось подключиться
async with self.lock: async with self.lock:
# Ревалидация кэша авторов # Ревалидация кэша авторов
if self.items_to_revalidate["authors"]: if self.items_to_revalidate["authors"]:

View File

@@ -147,32 +147,16 @@ await invalidate_topics_cache(456)
```python ```python
class CacheRevalidationManager: class CacheRevalidationManager:
def __init__(self, interval=CACHE_REVALIDATION_INTERVAL):
# ... # ...
self._redis = redis # Прямая ссылка на сервис Redis
async def start(self):
# Проверка и установка соединения с Redis
# ...
async def process_revalidation(self): async def process_revalidation(self):
# Обработка элементов для ревалидации
# ... # ...
def mark_for_revalidation(self, entity_id, entity_type): def mark_for_revalidation(self, entity_id, entity_type):
# Добавляет сущность в очередь на ревалидацию
# ... # ...
``` ```
Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации. Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации.
**Взаимодействие с Redis:** Особенности реализации:
- CacheRevalidationManager хранит прямую ссылку на сервис Redis через атрибут `_redis`
- При запуске проверяется наличие соединения с Redis и при необходимости устанавливается новое
- Включена автоматическая проверка соединения перед каждой операцией ревалидации
- Система самостоятельно восстанавливает соединение при его потере
**Особенности реализации:**
- Для авторов и тем используется поштучная ревалидация каждой записи - Для авторов и тем используется поштучная ревалидация каждой записи
- Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов - Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов
- При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки - При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки

15
main.py
View File

@@ -17,6 +17,7 @@ from cache.revalidator import revalidation_manager
from services.exception import ExceptionHandlerMiddleware from services.exception import ExceptionHandlerMiddleware
from services.redis import redis from services.redis import redis
from services.schema import create_all_tables, resolvers from services.schema import create_all_tables, resolvers
#from services.search import search_service
from services.search import search_service, initialize_search_index from services.search import search_service, initialize_search_index
from services.viewed import ViewedStorage from services.viewed import ViewedStorage
from services.webhook import WebhookEndpoint, create_webhook_endpoint from services.webhook import WebhookEndpoint, create_webhook_endpoint
@@ -42,15 +43,6 @@ async def check_search_service():
else: else:
print(f"[INFO] Search service is available: {info}") print(f"[INFO] Search service is available: {info}")
# Helper to run precache with timeout and catch errors
async def precache_with_timeout():
try:
await asyncio.wait_for(precache_data(), timeout=60)
except asyncio.TimeoutError:
print("[precache] Precache timed out after 60 seconds")
except Exception as e:
print(f"[precache] Error during precache: {e}")
# indexing DB data # indexing DB data
# async def indexing(): # async def indexing():
@@ -61,12 +53,9 @@ async def lifespan(_app):
try: try:
print("[lifespan] Starting application initialization") print("[lifespan] Starting application initialization")
create_all_tables() create_all_tables()
# schedule precaching in background with timeout and error handling
asyncio.create_task(precache_with_timeout())
await asyncio.gather( await asyncio.gather(
redis.connect(), redis.connect(),
precache_data(),
ViewedStorage.init(), ViewedStorage.init(),
create_webhook_endpoint(), create_webhook_endpoint(),
check_search_service(), check_search_service(),

View File

@@ -6,7 +6,6 @@ from sqlalchemy.orm import relationship
from orm.author import Author from orm.author import Author
from orm.topic import Topic from orm.topic import Topic
from services.db import Base from services.db import Base
from orm.shout import Shout
class DraftTopic(Base): class DraftTopic(Base):
@@ -27,14 +26,12 @@ class DraftAuthor(Base):
caption = Column(String, nullable=True, default="") caption = Column(String, nullable=True, default="")
class Draft(Base): class Draft(Base):
__tablename__ = "draft" __tablename__ = "draft"
# required # required
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time())) created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time()))
# Колонки для связей с автором created_by: int = Column(ForeignKey("author.id"), nullable=False)
created_by: int = Column("created_by", ForeignKey("author.id"), nullable=False) community: int = Column(ForeignKey("community.id"), nullable=False, default=1)
community: int = Column("community", ForeignKey("community.id"), nullable=False, default=1)
# optional # optional
layout: str = Column(String, nullable=True, default="article") layout: str = Column(String, nullable=True, default="article")
@@ -52,55 +49,7 @@ class Draft(Base):
# auto # auto
updated_at: int | None = Column(Integer, nullable=True, index=True) updated_at: int | None = Column(Integer, nullable=True, index=True)
deleted_at: int | None = Column(Integer, nullable=True, index=True) deleted_at: int | None = Column(Integer, nullable=True, index=True)
updated_by: int | None = Column("updated_by", ForeignKey("author.id"), nullable=True) updated_by: int | None = Column(ForeignKey("author.id"), nullable=True)
deleted_by: int | None = Column("deleted_by", ForeignKey("author.id"), nullable=True) deleted_by: int | None = Column(ForeignKey("author.id"), nullable=True)
authors = relationship(Author, secondary="draft_author")
# --- Relationships --- topics = relationship(Topic, secondary="draft_topic")
# Только many-to-many связи через вспомогательные таблицы
authors = relationship(Author, secondary="draft_author", lazy="select")
topics = relationship(Topic, secondary="draft_topic", lazy="select")
# Связь с Community (если нужна как объект, а не ID)
# community = relationship("Community", foreign_keys=[community_id], lazy="joined")
# Пока оставляем community_id как ID
# Связь с публикацией (один-к-одному или один-к-нулю)
# Загружается через joinedload в резолвере
publication = relationship(
"Shout",
primaryjoin="Draft.id == Shout.draft",
foreign_keys="Shout.draft",
uselist=False,
lazy="noload", # Не грузим по умолчанию, только через options
viewonly=True # Указываем, что это связь только для чтения
)
def dict(self):
"""
Сериализует объект Draft в словарь.
Гарантирует, что поля topics и authors всегда будут списками.
"""
return {
"id": self.id,
"created_at": self.created_at,
"created_by": self.created_by,
"community": self.community,
"layout": self.layout,
"slug": self.slug,
"title": self.title,
"subtitle": self.subtitle,
"lead": self.lead,
"body": self.body,
"media": self.media or [],
"cover": self.cover,
"cover_caption": self.cover_caption,
"lang": self.lang,
"seo": self.seo,
"updated_at": self.updated_at,
"deleted_at": self.deleted_at,
"updated_by": self.updated_by,
"deleted_by": self.deleted_by,
# Гарантируем, что topics и authors всегда будут списками
"topics": [topic.dict() for topic in (self.topics or [])],
"authors": [author.dict() for author in (self.authors or [])]
}

View File

@@ -71,34 +71,6 @@ class ShoutAuthor(Base):
class Shout(Base): class Shout(Base):
""" """
Публикация в системе. Публикация в системе.
Attributes:
body (str)
slug (str)
cover (str) : "Cover image url"
cover_caption (str) : "Cover image alt caption"
lead (str)
title (str)
subtitle (str)
layout (str)
media (dict)
authors (list[Author])
topics (list[Topic])
reactions (list[Reaction])
lang (str)
version_of (int)
oid (str)
seo (str) : JSON
draft (int)
created_at (int)
updated_at (int)
published_at (int)
featured_at (int)
deleted_at (int)
created_by (int)
updated_by (int)
deleted_by (int)
community (int)
""" """
__tablename__ = "shout" __tablename__ = "shout"

View File

@@ -8,7 +8,6 @@ from resolvers.author import ( # search_authors,
get_author_id, get_author_id,
get_authors_all, get_authors_all,
load_authors_by, load_authors_by,
load_authors_search,
update_author, update_author,
) )
from resolvers.community import get_communities_all, get_community from resolvers.community import get_communities_all, get_community
@@ -17,11 +16,9 @@ from resolvers.draft import (
delete_draft, delete_draft,
load_drafts, load_drafts,
publish_draft, publish_draft,
unpublish_draft,
update_draft, update_draft,
) )
from resolvers.editor import (
unpublish_shout,
)
from resolvers.feed import ( from resolvers.feed import (
load_shouts_coauthored, load_shouts_coauthored,
load_shouts_discussed, load_shouts_discussed,
@@ -74,7 +71,6 @@ __all__ = [
"get_author_follows_authors", "get_author_follows_authors",
"get_authors_all", "get_authors_all",
"load_authors_by", "load_authors_by",
"load_authors_search",
"update_author", "update_author",
## "search_authors", ## "search_authors",
# community # community

View File

@@ -20,7 +20,6 @@ from services.auth import login_required
from services.db import local_session from services.db import local_session
from services.redis import redis from services.redis import redis
from services.schema import mutation, query from services.schema import mutation, query
from services.search import search_service
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
DEFAULT_COMMUNITIES = [1] DEFAULT_COMMUNITIES = [1]
@@ -302,46 +301,6 @@ async def load_authors_by(_, _info, by, limit, offset):
return await get_authors_with_stats(limit, offset, by) return await get_authors_with_stats(limit, offset, by)
@query.field("load_authors_search")
async def load_authors_search(_, info, text: str, limit: int = 10, offset: int = 0):
"""
Resolver for searching authors by text. Works with txt-ai search endpony.
Args:
text: Search text
limit: Maximum number of authors to return
offset: Offset for pagination
Returns:
list: List of authors matching the search criteria
"""
# Get author IDs from search engine (already sorted by relevance)
search_results = await search_service.search_authors(text, limit, offset)
if not search_results:
return []
author_ids = [result.get("id") for result in search_results if result.get("id")]
if not author_ids:
return []
# Fetch full author objects from DB
with local_session() as session:
# Simple query to get authors by IDs - no need for stats here
authors_query = select(Author).filter(Author.id.in_(author_ids))
db_authors = session.execute(authors_query).scalars().all()
if not db_authors:
return []
# Create a dictionary for quick lookup
authors_dict = {str(author.id): author for author in db_authors}
# Keep the order from search results (maintains the relevance sorting)
ordered_authors = [authors_dict[author_id] for author_id in author_ids if author_id in authors_dict]
return ordered_authors
def get_author_id_from(slug="", user=None, author_id=None): def get_author_id_from(slug="", user=None, author_id=None):
try: try:
author_id = None author_id = None

View File

@@ -1,6 +1,8 @@
import time import time
from operator import or_
import trafilatura import trafilatura
from sqlalchemy.orm import joinedload from sqlalchemy.sql import and_
from cache.cache import ( from cache.cache import (
cache_author, cache_author,
@@ -10,7 +12,7 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft, DraftAuthor, DraftTopic from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from services.auth import login_required from services.auth import login_required
@@ -18,70 +20,34 @@ from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import mutation, query from services.schema import mutation, query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
def create_shout_from_draft(session, draft, author_id): def create_shout_from_draft(session, draft, author_id):
"""
Создаёт новый объект публикации (Shout) на основе черновика.
Args:
session: SQLAlchemy сессия (не используется, для совместимости)
draft (Draft): Объект черновика
author_id (int): ID автора публикации
Returns:
Shout: Новый объект публикации (не сохранённый в базе)
Пример:
>>> from orm.draft import Draft
>>> draft = Draft(id=1, title='Заголовок', body='Текст', slug='slug', created_by=1)
>>> shout = create_shout_from_draft(None, draft, 1)
>>> shout.title
'Заголовок'
>>> shout.body
'Текст'
>>> shout.created_by
1
"""
# Создаем новую публикацию # Создаем новую публикацию
shout = Shout( shout = Shout(
body=draft.body or "", body=draft.body,
slug=draft.slug, slug=draft.slug,
cover=draft.cover, cover=draft.cover,
cover_caption=draft.cover_caption, cover_caption=draft.cover_caption,
lead=draft.lead, lead=draft.lead,
title=draft.title or "", title=draft.title,
subtitle=draft.subtitle, subtitle=draft.subtitle,
layout=draft.layout or "article", layout=draft.layout,
media=draft.media or [], media=draft.media,
lang=draft.lang or "ru", lang=draft.lang,
seo=draft.seo, seo=draft.seo,
created_by=author_id, created_by=author_id,
community=draft.community, community=draft.community,
draft=draft.id, draft=draft.id,
deleted_at=None, deleted_at=None,
) )
# Инициализируем пустые массивы для связей
shout.topics = []
shout.authors = []
return shout return shout
@query.field("load_drafts") @query.field("load_drafts")
@login_required @login_required
async def load_drafts(_, info): async def load_drafts(_, info):
"""
Загружает все черновики, доступные текущему пользователю.
Предварительно загружает связанные объекты (topics, authors, publication),
чтобы избежать ошибок с отсоединенными объектами при сериализации.
Returns:
dict: Список черновиков или сообщение об ошибке
"""
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
@@ -89,44 +55,13 @@ async def load_drafts(_, info):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "User ID and author ID are required"} return {"error": "User ID and author ID are required"}
try:
with local_session() as session: with local_session() as session:
# Предзагружаем authors, topics и связанную publication drafts = (
drafts_query = (
session.query(Draft) session.query(Draft)
.options( .filter(or_(Draft.authors.any(Author.id == author_id), Draft.created_by == author_id))
joinedload(Draft.topics), .all()
joinedload(Draft.authors),
joinedload(Draft.publication) # Загружаем связанную публикацию
) )
.filter(Draft.authors.any(Author.id == author_id)) return {"drafts": drafts}
)
drafts = drafts_query.all()
# Преобразуем объекты в словари, пока они в контексте сессии
drafts_data = []
for draft in drafts:
draft_dict = draft.dict()
# Всегда возвращаем массив для topics, даже если он пустой
draft_dict["topics"] = [topic.dict() for topic in (draft.topics or [])]
draft_dict["authors"] = [author.dict() for author in (draft.authors or [])]
# Добавляем информацию о публикации, если она есть
if draft.publication:
draft_dict["publication"] = {
"id": draft.publication.id,
"slug": draft.publication.slug,
"published_at": draft.publication.published_at
}
else:
draft_dict["publication"] = None
drafts_data.append(draft_dict)
return {"drafts": drafts_data}
except Exception as e:
logger.error(f"Failed to load drafts: {e}", exc_info=True)
return {"error": f"Failed to load drafts: {str(e)}"}
@mutation.field("create_draft") @mutation.field("create_draft")
@@ -181,29 +116,21 @@ async def create_draft(_, info, draft_input):
if "id" in draft_input: if "id" in draft_input:
del draft_input["id"] del draft_input["id"]
# Добавляем текущее время создания и ID автора if "seo" not in draft_input and not draft_input["seo"]:
body_teaser = draft_input.get("body", "")[:300].split("\n")[:-1].join("\n")
draft_input["seo"] = draft_input.get("lead", body_teaser)
# Добавляем текущее время создания
draft_input["created_at"] = int(time.time()) draft_input["created_at"] = int(time.time())
draft_input["created_by"] = author_id
draft = Draft(**draft_input) draft = Draft(created_by=author_id, **draft_input)
session.add(draft) session.add(draft)
session.flush()
# Добавляем создателя как автора
da = DraftAuthor(shout=draft.id, author=author_id)
session.add(da)
session.commit() session.commit()
return {"draft": draft} return {"draft": draft}
except Exception as e: except Exception as e:
logger.error(f"Failed to create draft: {e}", exc_info=True) logger.error(f"Failed to create draft: {e}", exc_info=True)
return {"error": f"Failed to create draft: {str(e)}"} return {"error": f"Failed to create draft: {str(e)}"}
def generate_teaser(body, limit=300):
body_html = wrap_html_fragment(body)
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False)
body_teaser = ". ".join(body_text[:limit].split(". ")[:-1])
return body_teaser
@mutation.field("update_draft") @mutation.field("update_draft")
@login_required @login_required
@@ -212,21 +139,7 @@ async def update_draft(_, info, draft_id: int, draft_input):
Args: Args:
draft_id: ID черновика для обновления draft_id: ID черновика для обновления
draft_input: Данные для обновления черновика согласно схеме DraftInput: draft_input: Данные для обновления черновика
- layout: String
- author_ids: [Int!]
- topic_ids: [Int!]
- main_topic_id: Int
- media: [MediaItemInput]
- lead: String
- subtitle: String
- lang: String
- seo: String
- body: String
- title: String
- slug: String
- cover: String
- cover_caption: String
Returns: Returns:
dict: Обновленный черновик или сообщение об ошибке dict: Обновленный черновик или сообщение об ошибке
@@ -238,89 +151,36 @@ async def update_draft(_, info, draft_id: int, draft_input):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "Author ID are required"} return {"error": "Author ID are required"}
try: # Проверяем slug - он должен быть или не пустым, или не передаваться вообще
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""):
# Если slug пустой, либо удаляем его из входных данных, либо генерируем временный уникальный
# Вариант 1: просто удаляем ключ из входных данных, чтобы оставить старое значение
del draft_input["slug"]
# Вариант 2 (если нужно обновить): генерируем временный уникальный slug
# import uuid
# draft_input["slug"] = f"draft-{uuid.uuid4().hex[:8]}"
with local_session() as session: with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first() draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft: if not draft:
return {"error": "Draft not found"} return {"error": "Draft not found"}
# Фильтруем входные данные, оставляя только разрешенные поля if "seo" not in draft_input and not draft.seo:
allowed_fields = { body_src = draft_input["body"] if "body" in draft_input else draft.body
"layout", "author_ids", "topic_ids", "main_topic_id", body_text = trafilatura.extract(body_src)
"media", "lead", "subtitle", "lang", "seo", "body", lead_src = draft_input["lead"] if "lead" in draft_input else draft.lead
"title", "slug", "cover", "cover_caption" lead_text = trafilatura.extract(lead_src)
} body_teaser = body_text[:300].split(". ")[:-1].join(".\n")
filtered_input = {k: v for k, v in draft_input.items() if k in allowed_fields} draft_input["seo"] = lead_text or body_teaser
# Проверяем slug Draft.update(draft, draft_input)
if "slug" in filtered_input and not filtered_input["slug"]: # Set updated_at and updated_by from the authenticated user
del filtered_input["slug"] current_time = int(time.time())
draft.updated_at = current_time
# Обновляем связи с авторами если переданы
if "author_ids" in filtered_input:
author_ids = filtered_input.pop("author_ids")
if author_ids:
# Очищаем текущие связи
session.query(DraftAuthor).filter(DraftAuthor.shout == draft_id).delete()
# Добавляем новые связи
for aid in author_ids:
da = DraftAuthor(shout=draft_id, author=aid)
session.add(da)
# Обновляем связи с темами если переданы
if "topic_ids" in filtered_input:
topic_ids = filtered_input.pop("topic_ids")
main_topic_id = filtered_input.pop("main_topic_id", None)
if topic_ids:
# Очищаем текущие связи
session.query(DraftTopic).filter(DraftTopic.shout == draft_id).delete()
# Добавляем новые связи
for tid in topic_ids:
dt = DraftTopic(
shout=draft_id,
topic=tid,
main=(tid == main_topic_id) if main_topic_id else False
)
session.add(dt)
# Генерируем SEO если не предоставлено
if "seo" not in filtered_input and not draft.seo:
body_src = filtered_input.get("body", draft.body)
lead_src = filtered_input.get("lead", draft.lead)
body_html = wrap_html_fragment(body_src)
lead_html = wrap_html_fragment(lead_src)
try:
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False) if body_src else None
lead_text = trafilatura.extract(lead_html, include_comments=False, include_tables=False) if lead_src else None
body_teaser = generate_teaser(body_text, 300) if body_text else ""
filtered_input["seo"] = lead_text if lead_text else body_teaser
except Exception as e:
logger.warning(f"Failed to generate SEO for draft {draft_id}: {e}")
# Обновляем основные поля черновика
for key, value in filtered_input.items():
setattr(draft, key, value)
# Обновляем метаданные
draft.updated_at = int(time.time())
draft.updated_by = author_id draft.updated_by = author_id
session.commit() session.commit()
return {"draft": draft}
# Преобразуем объект в словарь для ответа
draft_dict = draft.dict()
draft_dict["topics"] = [topic.dict() for topic in draft.topics]
draft_dict["authors"] = [author.dict() for author in draft.authors]
# Добавляем объект автора в updated_by
draft_dict["updated_by"] = author_dict
return {"draft": draft_dict}
except Exception as e:
logger.error(f"Failed to update draft: {e}", exc_info=True)
return {"error": f"Failed to update draft: {str(e)}"}
@mutation.field("delete_draft") @mutation.field("delete_draft")
@@ -340,136 +200,182 @@ async def delete_draft(_, info, draft_id: int):
return {"draft": draft} return {"draft": draft}
def validate_html_content(html_content: str) -> tuple[bool, str]:
"""
Проверяет валидность HTML контента через trafilatura.
Args:
html_content: HTML строка для проверки
Returns:
tuple[bool, str]: (валидность, сообщение об ошибке)
Example:
>>> is_valid, error = validate_html_content("<p>Valid HTML</p>")
>>> is_valid
True
>>> error
''
>>> is_valid, error = validate_html_content("Invalid < HTML")
>>> is_valid
False
>>> 'Invalid HTML' in error
True
"""
if not html_content or not html_content.strip():
return False, "Content is empty"
try:
html_content = wrap_html_fragment(html_content)
extracted = trafilatura.extract(html_content)
if not extracted:
return False, "Invalid HTML structure or empty content"
return True, ""
except Exception as e:
logger.error(f"HTML validation error: {e}", exc_info=True)
return False, f"Invalid HTML content: {str(e)}"
@mutation.field("publish_draft") @mutation.field("publish_draft")
@login_required @login_required
async def publish_draft(_, info, draft_id: int): async def publish_draft(_, info, draft_id: int):
""" user_id = info.context.get("user_id")
Публикует черновик, создавая новый Shout или обновляя существующий. author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = create_shout_from_draft(session, draft, author_id)
session.add(shout)
session.commit()
return {"shout": shout, "draft": draft}
@mutation.field("unpublish_draft")
@login_required
async def unpublish_draft(_, info, draft_id: int):
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = session.query(Shout).filter(Shout.draft == draft.id).first()
if shout:
shout.published_at = None
session.commit()
return {"shout": shout, "draft": draft}
return {"error": "Failed to unpublish draft"}
@mutation.field("publish_shout")
@login_required
async def publish_shout(_, info, shout_id: int):
"""Publish draft as a shout or update existing shout.
Args: Args:
draft_id (int): ID черновика для публикации shout_id: ID существующей публикации или 0 для новой
draft: Объект черновика (опционально)
Returns:
dict: Результат публикации с shout или сообщением об ошибке
""" """
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
now = int(time.time())
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "Author ID is required"} return {"error": "User ID and author ID are required"}
try: try:
with local_session() as session: with local_session() as session:
# Загружаем черновик со всеми связями shout = session.query(Shout).filter(Shout.id == shout_id).first()
draft = ( if not shout:
session.query(Draft) return {"error": "Shout not found"}
.options( was_published = shout.published_at is not None
joinedload(Draft.topics), draft = session.query(Draft).where(Draft.id == shout.draft).first()
joinedload(Draft.authors),
joinedload(Draft.publication)
)
.filter(Draft.id == draft_id)
.first()
)
if not draft: if not draft:
return {"error": "Draft not found"} return {"error": "Draft not found"}
# Находим черновик если не передан
# Проверка валидности HTML в body if not shout:
is_valid, error = validate_html_content(draft.body)
if not is_valid:
return {"error": f"Cannot publish draft: {error}"}
# Проверяем, есть ли уже публикация для этого черновика
if draft.publication:
shout = draft.publication
# Обновляем существующую публикацию
for field in ["body", "title", "subtitle", "lead", "cover", "cover_caption", "media", "lang", "seo"]:
if hasattr(draft, field):
setattr(shout, field, getattr(draft, field))
shout.updated_at = int(time.time())
shout.updated_by = author_id
else:
# Создаем новую публикацию
shout = create_shout_from_draft(session, draft, author_id) shout = create_shout_from_draft(session, draft, author_id)
now = int(time.time()) else:
shout.created_at = now # Обновляем существующую публикацию
shout.draft = draft.id
shout.created_by = author_id
shout.title = draft.title
shout.subtitle = draft.subtitle
shout.body = draft.body
shout.cover = draft.cover
shout.cover_caption = draft.cover_caption
shout.lead = draft.lead
shout.layout = draft.layout
shout.media = draft.media
shout.lang = draft.lang
shout.seo = draft.seo
draft.updated_at = now
shout.updated_at = now
# Устанавливаем published_at только если была ранее снята с публикации
if not was_published:
shout.published_at = now shout.published_at = now
session.add(shout)
session.flush() # Получаем ID нового шаута
# Очищаем существующие связи # Обрабатываем связи с авторами
session.query(ShoutAuthor).filter(ShoutAuthor.shout == shout.id).delete() if (
session.query(ShoutTopic).filter(ShoutTopic.shout == shout.id).delete() not session.query(ShoutAuthor)
.filter(and_(ShoutAuthor.shout == shout.id, ShoutAuthor.author == author_id))
# Добавляем авторов .first()
for author in (draft.authors or []): ):
sa = ShoutAuthor(shout=shout.id, author=author.id) sa = ShoutAuthor(shout=shout.id, author=author_id)
session.add(sa) session.add(sa)
# Добавляем темы # Обрабатываем темы
for topic in (draft.topics or []): if draft.topics:
for topic in draft.topics:
st = ShoutTopic( st = ShoutTopic(
topic=topic.id, topic=topic.id, shout=shout.id, main=topic.main if hasattr(topic, "main") else False
shout=shout.id,
main=topic.main if hasattr(topic, "main") else False
) )
session.add(st) session.add(st)
session.commit() session.add(shout)
session.add(draft)
session.flush()
# Инвалидируем кеш # Инвалидируем кэш только если это новая публикация или была снята с публикации
invalidate_shouts_cache() if not was_published:
invalidate_shout_related_cache(shout.id) cache_keys = ["feed", f"author_{author_id}", "random_top", "unrated"]
# Уведомляем о публикации # Добавляем ключи для тем
await notify_shout(shout.id) for topic in shout.topics:
cache_keys.append(f"topic_{topic.id}")
cache_keys.append(f"topic_shouts_{topic.id}")
await cache_by_id(Topic, topic.id, cache_topic)
# Инвалидируем кэш
await invalidate_shouts_cache(cache_keys)
await invalidate_shout_related_cache(shout, author_id)
# Обновляем кэш авторов
for author in shout.authors:
await cache_by_id(Author, author.id, cache_author)
# Отправляем уведомление о публикации
await notify_shout(shout.dict(), "published")
# Обновляем поисковый индекс # Обновляем поисковый индекс
search_service.index_shout(shout) search_service.index(shout)
else:
logger.info(f"Successfully published shout #{shout.id} from draft #{draft_id}") # Для уже опубликованных материалов просто отправляем уведомление об обновлении
logger.debug(f"Shout data: {shout.dict()}") await notify_shout(shout.dict(), "update")
session.commit()
return {"shout": shout} return {"shout": shout}
except Exception as e: except Exception as e:
logger.error(f"Failed to publish draft {draft_id}: {e}", exc_info=True) logger.error(f"Failed to publish shout: {e}", exc_info=True)
return {"error": f"Failed to publish draft: {str(e)}"} if "session" in locals():
session.rollback()
return {"error": f"Failed to publish shout: {str(e)}"}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Unpublish a shout.
Args:
shout_id: The ID of the shout to unpublish
Returns:
dict: The unpublished shout or an error message
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
shout.published_at = None
session.commit()
invalidate_shout_related_cache(shout)
invalidate_shouts_cache()
except Exception:
session.rollback()
return {"error": "Failed to unpublish shout"}
return {"shout": shout}

View File

@@ -3,7 +3,7 @@ import time
import orjson import orjson
import trafilatura import trafilatura
from sqlalchemy import and_, desc, select from sqlalchemy import and_, desc, select
from sqlalchemy.orm import joinedload, selectinload from sqlalchemy.orm import joinedload
from sqlalchemy.sql.functions import coalesce from sqlalchemy.sql.functions import coalesce
from cache.cache import ( from cache.cache import (
@@ -13,7 +13,6 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from resolvers.follower import follow, unfollow from resolvers.follower import follow, unfollow
@@ -21,9 +20,8 @@ from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import mutation, query from services.schema import query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
@@ -181,11 +179,9 @@ async def create_shout(_, info, inp):
# Создаем публикацию без topics # Создаем публикацию без topics
body = inp.get("body", "") body = inp.get("body", "")
lead = inp.get("lead", "") lead = inp.get("lead", "")
body_html = wrap_html_fragment(body) body_text = trafilatura.extract(body)
lead_html = wrap_html_fragment(lead) lead_text = trafilatura.extract(lead)
body_text = trafilatura.extract(body_html) seo = inp.get("seo", lead_text or body_text[:300].split(". ")[:-1].join(". "))
lead_text = trafilatura.extract(lead_html)
seo = inp.get("seo", lead_text.strip() or body_text.strip()[:300].split(". ")[:-1].join(". "))
new_shout = Shout( new_shout = Shout(
slug=slug, slug=slug,
body=body, body=body,
@@ -649,18 +645,15 @@ def get_main_topic(topics):
"""Get the main topic from a list of ShoutTopic objects.""" """Get the main topic from a list of ShoutTopic objects."""
logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics") logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics")
logger.debug( logger.debug(
f"Topics data: {[(t.slug, getattr(t, 'main', False)) for t in topics] if topics else []}" f"Topics data: {[(t.topic.slug if t.topic else 'no-topic', t.main) for t in topics] if topics else []}"
) )
if not topics: if not topics:
logger.warning("No topics provided to get_main_topic") logger.warning("No topics provided to get_main_topic")
return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True} return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
# Проверяем, является ли topics списком объектов ShoutTopic или Topic
if hasattr(topics[0], 'topic') and topics[0].topic:
# Для ShoutTopic объектов (старый формат)
# Find first main topic in original order # Find first main topic in original order
main_topic_rel = next((st for st in topics if getattr(st, 'main', False)), None) main_topic_rel = next((st for st in topics if st.main), None)
logger.debug( logger.debug(
f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}" f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}"
) )
@@ -685,142 +678,6 @@ def get_main_topic(topics):
"is_main": True, "is_main": True,
} }
return result return result
else:
# Для Topic объектов (новый формат из selectinload)
# После смены на selectinload у нас просто список Topic объектов
if topics:
logger.info(f"Using first topic as main: {topics[0].slug}")
result = {
"slug": topics[0].slug,
"title": topics[0].title,
"id": topics[0].id,
"is_main": True,
}
return result
logger.warning("No valid topics found, returning default") logger.warning("No valid topics found, returning default")
return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True} return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Снимает публикацию (shout) с публикации.
Предзагружает связанный черновик (draft) и его авторов/темы, чтобы избежать
ошибок при последующем доступе к ним в GraphQL.
Args:
shout_id: ID публикации для снятия с публикации
Returns:
dict: Снятая с публикации публикация или сообщение об ошибке
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
# В идеале нужна проверка прав, имеет ли автор право снимать публикацию
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
# Загружаем Shout со всеми связями для правильного формирования ответа
shout = (
session.query(Shout)
.options(
joinedload(Shout.authors),
selectinload(Shout.topics)
)
.filter(Shout.id == shout_id)
.first()
)
if not shout:
logger.warning(f"Shout not found for unpublish: ID {shout_id}")
return {"error": "Shout not found"}
# Если у публикации есть связанный черновик, загружаем его с relationships
if shout.draft:
# Отдельно загружаем черновик с его связями
draft = (
session.query(Draft)
.options(
selectinload(Draft.authors),
selectinload(Draft.topics)
)
.filter(Draft.id == shout.draft)
.first()
)
# Связываем черновик с публикацией вручную для доступа через API
if draft:
shout.draft_obj = draft
# TODO: Добавить проверку прав доступа, если необходимо
# if author_id not in [a.id for a in shout.authors]: # Требует selectinload(Shout.authors) выше
# logger.warning(f"Author {author_id} denied unpublishing shout {shout_id}")
# return {"error": "Access denied"}
# Запоминаем старый slug и id для формирования поля publication
shout_slug = shout.slug
shout_id_for_publication = shout.id
# Снимаем с публикации (устанавливаем published_at в None)
shout.published_at = None
session.commit()
# Формируем полноценный словарь для ответа
shout_dict = shout.dict()
# Добавляем связанные данные
shout_dict["topics"] = (
[
{"id": topic.id, "slug": topic.slug, "title": topic.title}
for topic in shout.topics
]
if shout.topics
else []
)
# Добавляем main_topic
shout_dict["main_topic"] = get_main_topic(shout.topics)
# Добавляем авторов
shout_dict["authors"] = (
[
{"id": author.id, "name": author.name, "slug": author.slug}
for author in shout.authors
]
if shout.authors
else []
)
# Важно! Обновляем поле publication, отражая состояние "снят с публикации"
shout_dict["publication"] = {
"id": shout_id_for_publication,
"slug": shout_slug,
"published_at": None # Ключевое изменение - устанавливаем published_at в None
}
# Инвалидация кэша
try:
cache_keys = [
"feed", # лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
]
await invalidate_shout_related_cache(shout, author_id)
await invalidate_shouts_cache(cache_keys)
logger.info(f"Cache invalidated after unpublishing shout {shout_id}")
except Exception as cache_err:
logger.error(f"Failed to invalidate cache for unpublish shout {shout_id}: {cache_err}")
except Exception as e:
session.rollback()
logger.error(f"Failed to unpublish shout {shout_id}: {e}", exc_info=True)
return {"error": f"Failed to unpublish shout: {str(e)}"}
# Возвращаем сформированный словарь вместо объекта
logger.info(f"Shout {shout_id} unpublished successfully by author {author_id}")
return {"shout": shout_dict}

View File

@@ -488,15 +488,11 @@ def apply_reaction_filters(by, q):
if shout_slug: if shout_slug:
q = q.filter(Shout.slug == shout_slug) q = q.filter(Shout.slug == shout_slug)
shout_id = by.get("shout_id")
if shout_id:
q = q.filter(Shout.id == shout_id)
shouts = by.get("shouts") shouts = by.get("shouts")
if shouts: if shouts:
q = q.filter(Shout.slug.in_(shouts)) q = q.filter(Shout.slug.in_(shouts))
created_by = by.get("created_by", by.get("author_id")) created_by = by.get("created_by")
if created_by: if created_by:
q = q.filter(Author.id == created_by) q = q.filter(Author.id == created_by)

View File

@@ -396,25 +396,38 @@ async def load_shouts_search(_, info, text, options):
# Get search results with pagination # Get search results with pagination
results = await search_text(text, limit, offset) results = await search_text(text, limit, offset)
# If no results, return empty list
if not results: if not results:
logger.info(f"No search results found for '{text}'") logger.info(f"No search results found for '{text}'")
return [] return []
# Extract IDs in the order from the search engine # Extract IDs and scores
hits_ids = [str(sr.get("id")) for sr in results if sr.get("id")] scores = {}
hits_ids = []
for sr in results:
shout_id = sr.get("id")
if shout_id:
shout_id = str(shout_id)
scores[shout_id] = sr.get("score")
hits_ids.append(shout_id)
# Query DB for only the IDs in the current page # Query DB for only the IDs in the current page
q = query_with_stat(info) q = query_with_stat(info)
q = q.filter(Shout.id.in_(hits_ids)) q = q.filter(Shout.id.in_(hits_ids))
q = apply_filters(q, options.get("filters", {})) q = apply_filters(q, options.get("filters", {}))
#
shouts = get_shouts_with_links(info, q, len(hits_ids), 0) shouts = get_shouts_with_links(info, q, len(hits_ids), 0)
# Reorder shouts to match the order from hits_ids # Add scores from search results
shouts_dict = {str(shout['id']): shout for shout in shouts} for shout in shouts:
ordered_shouts = [shouts_dict[shout_id] for shout_id in hits_ids if shout_id in shouts_dict] shout_id = str(shout['id'])
shout["score"] = scores.get(shout_id, 0)
return ordered_shouts # Re-sort by search score to maintain ranking
shouts.sort(key=lambda x: scores.get(str(x['id']), 0), reverse=True)
return shouts
return [] return []

View File

@@ -10,7 +10,6 @@ from cache.cache import (
) )
from orm.author import Author from orm.author import Author
from orm.topic import Topic from orm.topic import Topic
from orm.reaction import ReactionKind
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
@@ -113,7 +112,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
shouts_stats_query = f""" shouts_stats_query = f"""
SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count
FROM shout_topic st FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))}) WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic GROUP BY st.topic
""" """
@@ -122,7 +121,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
# Запрос на получение статистики по подписчикам для выбранных тем # Запрос на получение статистики по подписчикам для выбранных тем
followers_stats_query = f""" followers_stats_query = f"""
SELECT topic, COUNT(DISTINCT follower) as followers_count SELECT topic, COUNT(DISTINCT follower) as followers_count
FROM topic_followers tf FROM topic_followers
WHERE topic IN ({",".join(map(str, topic_ids))}) WHERE topic IN ({",".join(map(str, topic_ids))})
GROUP BY topic GROUP BY topic
""" """
@@ -144,8 +143,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
SELECT st.topic, COUNT(DISTINCT r.id) as comments_count SELECT st.topic, COUNT(DISTINCT r.id) as comments_count
FROM shout_topic st FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
JOIN reaction r ON r.shout = s.id AND r.kind = '{ReactionKind.COMMENT.value}' AND r.deleted_at IS NULL JOIN reaction r ON r.shout = s.id
JOIN author a ON r.created_by = a.id AND a.deleted_at IS NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))}) WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic GROUP BY st.topic
""" """

View File

@@ -92,14 +92,12 @@ input LoadShoutsOptions {
input ReactionBy { input ReactionBy {
shout: String shout: String
shout_id: Int
shouts: [String] shouts: [String]
search: String search: String
kinds: [ReactionKind] kinds: [ReactionKind]
reply_to: Int # filter reply_to: Int # filter
topic: String topic: String
created_by: Int created_by: Int
author_id: Int
author: String author: String
after: Int after: Int
sort: ReactionSort # sort sort: ReactionSort # sort

View File

@@ -4,7 +4,7 @@ type Query {
get_author_id(user: String!): Author get_author_id(user: String!): Author
get_authors_all: [Author] get_authors_all: [Author]
load_authors_by(by: AuthorsBy!, limit: Int, offset: Int): [Author] load_authors_by(by: AuthorsBy!, limit: Int, offset: Int): [Author]
load_authors_search(text: String!, limit: Int, offset: Int): [Author!] # Search for authors by name or bio # search_authors(what: String!): [Author]
# community # community
get_community: Community get_community: Community

View File

@@ -107,12 +107,6 @@ type Shout {
score: Float score: Float
} }
type PublicationInfo {
id: Int!
slug: String!
published_at: Int
}
type Draft { type Draft {
id: Int! id: Int!
created_at: Int! created_at: Int!
@@ -135,9 +129,9 @@ type Draft {
deleted_at: Int deleted_at: Int
updated_by: Author updated_by: Author
deleted_by: Author deleted_by: Author
authors: [Author]! authors: [Author]
topics: [Topic]! topics: [Topic]
publication: PublicationInfo
} }
type Stat { type Stat {

34
server.py Normal file
View File

@@ -0,0 +1,34 @@
import sys
from pathlib import Path
from granian.constants import Interfaces
from granian.log import LogLevels
from granian.server import Server
from settings import PORT
from utils.logger import root_logger as logger
if __name__ == "__main__":
logger.info("started")
try:
granian_instance = Server(
"main:app",
address="0.0.0.0",
port=PORT,
interface=Interfaces.ASGI,
workers=1,
websockets=False,
log_level=LogLevels.debug,
backlog=2048,
)
if "dev" in sys.argv:
logger.info("dev mode, building ssl context")
granian_instance.build_ssl_context(cert=Path("localhost.pem"), key=Path("localhost-key.pem"), password=None)
granian_instance.serve()
except Exception as error:
logger.error(error, exc_info=True)
raise
finally:
logger.info("stopped")

View File

@@ -19,7 +19,7 @@ from sqlalchemy import (
inspect, inspect,
text, text,
) )
from sqlalchemy.orm import Session, configure_mappers, declarative_base, joinedload from sqlalchemy.orm import Session, configure_mappers, declarative_base
from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import Table
from settings import DB_URL from settings import DB_URL
@@ -260,11 +260,8 @@ def get_json_builder():
# Используем их в коде # Используем их в коде
json_builder, json_array_builder, json_cast = get_json_builder() json_builder, json_array_builder, json_cast = get_json_builder()
# Fetch all shouts, with authors preloaded
# This function is used for search indexing
async def fetch_all_shouts(session=None): async def fetch_all_shouts(session=None):
"""Fetch all published shouts for search indexing with authors preloaded""" """Fetch all published shouts for search indexing"""
from orm.shout import Shout from orm.shout import Shout
close_session = False close_session = False
@@ -273,10 +270,8 @@ async def fetch_all_shouts(session=None):
close_session = True close_session = True
try: try:
# Fetch only published and non-deleted shouts with authors preloaded # Fetch only published and non-deleted shouts
query = session.query(Shout).options( query = session.query(Shout).filter(
joinedload(Shout.authors)
).filter(
Shout.published_at.is_not(None), Shout.published_at.is_not(None),
Shout.deleted_at.is_(None) Shout.deleted_at.is_(None)
) )

View File

@@ -53,66 +53,3 @@ async def notify_follower(follower: dict, author_id: int, action: str = "follow"
except Exception as e: except Exception as e:
# Log the error and re-raise it # Log the error and re-raise it
logger.error(f"Failed to publish to channel {channel_name}: {e}") logger.error(f"Failed to publish to channel {channel_name}: {e}")
async def notify_draft(draft_data, action: str = "publish"):
"""
Отправляет уведомление о публикации или обновлении черновика.
Функция гарантирует, что данные черновика сериализуются корректно, включая
связанные атрибуты (topics, authors).
Args:
draft_data (dict): Словарь с данными черновика. Должен содержать минимум id и title
action (str, optional): Действие ("publish", "update"). По умолчанию "publish"
Returns:
None
Examples:
>>> draft = {"id": 1, "title": "Тестовый черновик", "slug": "test-draft"}
>>> await notify_draft(draft, "publish")
"""
channel_name = "draft"
try:
# Убеждаемся, что все необходимые данные присутствуют
# и объект не требует доступа к отсоединенным атрибутам
if isinstance(draft_data, dict):
draft_payload = draft_data
else:
# Если это ORM объект, преобразуем его в словарь с нужными атрибутами
draft_payload = {
"id": getattr(draft_data, "id", None),
"slug": getattr(draft_data, "slug", None),
"title": getattr(draft_data, "title", None),
"subtitle": getattr(draft_data, "subtitle", None),
"media": getattr(draft_data, "media", None),
"created_at": getattr(draft_data, "created_at", None),
"updated_at": getattr(draft_data, "updated_at", None)
}
# Если переданы связанные атрибуты, добавим их
if hasattr(draft_data, "topics") and draft_data.topics is not None:
draft_payload["topics"] = [
{"id": t.id, "name": t.name, "slug": t.slug}
for t in draft_data.topics
]
if hasattr(draft_data, "authors") and draft_data.authors is not None:
draft_payload["authors"] = [
{"id": a.id, "name": a.name, "slug": a.slug, "pic": getattr(a, "pic", None)}
for a in draft_data.authors
]
data = {"payload": draft_payload, "action": action}
# Сохраняем уведомление
save_notification(action, channel_name, data.get("payload"))
# Публикуем в Redis
json_data = orjson.dumps(data)
if json_data:
await redis.publish(channel_name, json_data)
except Exception as e:
logger.error(f"Failed to publish to channel {channel_name}: {e}")

View File

@@ -1,15 +1,14 @@
from asyncio.log import logger from asyncio.log import logger
import httpx import httpx
from ariadne import MutationType, ObjectType, QueryType from ariadne import MutationType, QueryType
from services.db import create_table_if_not_exists, local_session from services.db import create_table_if_not_exists, local_session
from settings import AUTH_URL from settings import AUTH_URL
query = QueryType() query = QueryType()
mutation = MutationType() mutation = MutationType()
type_draft = ObjectType("Draft") resolvers = [query, mutation]
resolvers = [query, mutation, type_draft]
async def request_graphql_data(gql, url=AUTH_URL, headers=None): async def request_graphql_data(gql, url=AUTH_URL, headers=None):
@@ -29,19 +28,12 @@ async def request_graphql_data(gql, url=AUTH_URL, headers=None):
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
response = await client.post(url, json=gql, headers=headers) response = await client.post(url, json=gql, headers=headers)
if response.status_code == 200: if response.status_code == 200:
# Check if the response has content before parsing
if response.content and len(response.content.strip()) > 0:
try:
data = response.json() data = response.json()
errors = data.get("errors") errors = data.get("errors")
if errors: if errors:
logger.error(f"{url} response: {data}") logger.error(f"{url} response: {data}")
else: else:
return data return data
except Exception as json_err:
logger.error(f"JSON decode error: {json_err}, Response content: {response.text[:100]}")
else:
logger.error(f"{url}: Response is empty")
else: else:
logger.error(f"{url}: {response.status_code} {response.text}") logger.error(f"{url}: {response.status_code} {response.text}")
except Exception as _e: except Exception as _e:

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@

View File

@@ -1,38 +0,0 @@
"""
Модуль для обработки HTML-фрагментов
"""
def wrap_html_fragment(fragment: str) -> str:
"""
Оборачивает HTML-фрагмент в полную HTML-структуру для корректной обработки.
Args:
fragment: HTML-фрагмент для обработки
Returns:
str: Полный HTML-документ
Example:
>>> wrap_html_fragment("<p>Текст параграфа</p>")
'<!DOCTYPE html><html><head><meta charset="utf-8"></head><body><p>Текст параграфа</p></body></html>'
"""
if not fragment or not fragment.strip():
return fragment
# Проверяем, является ли контент полным HTML-документом
is_full_html = fragment.strip().startswith('<!DOCTYPE') or fragment.strip().startswith('<html')
# Если это фрагмент, оборачиваем его в полный HTML-документ
if not is_full_html:
return f"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title></title>
</head>
<body>
{fragment}
</body>
</html>"""
return fragment