diff --git a/cache/cache.py b/cache/cache.py index 1fab2140..faaf0bd8 100644 --- a/cache/cache.py +++ b/cache/cache.py @@ -177,7 +177,7 @@ async def get_cached_authors_by_ids(author_ids: List[int]) -> List[dict]: missing_ids = [author_ids[index] for index in missing_indices] with local_session() as session: query = select(Author).where(Author.id.in_(missing_ids)) - missing_authors = session.execute(query).scalars().all() + missing_authors = session.execute(query).scalars().unique().all() await asyncio.gather(*(cache_author(author.dict()) for author in missing_authors)) for index, author in zip(missing_indices, missing_authors): authors[index] = author.dict() diff --git a/resolvers/author.py b/resolvers/author.py index 53d9ba0b..80177416 100644 --- a/resolvers/author.py +++ b/resolvers/author.py @@ -47,7 +47,7 @@ async def get_all_authors(current_user_id=None): with local_session() as session: # Запрос на получение базовой информации об авторах authors_query = select(Author).where(Author.deleted_at.is_(None)) - authors = session.execute(authors_query).scalars().all() + authors = session.execute(authors_query).scalars().unique().all() # Преобразуем авторов в словари с учетом прав доступа return [author.dict(current_user_id, False) for author in authors] @@ -174,7 +174,7 @@ async def get_authors_with_stats(limit=50, offset=0, by: Optional[str] = None, c base_query = base_query.limit(limit).offset(offset) # Получаем авторов - authors = session.execute(base_query).scalars().all() + authors = session.execute(base_query).scalars().unique().all() author_ids = [author.id for author in authors] if not author_ids: @@ -418,7 +418,7 @@ async def load_authors_search(_, info, text: str, limit: int = 10, offset: int = with local_session() as session: # Simple query to get authors by IDs - no need for stats here authors_query = select(Author).filter(Author.id.in_(author_ids)) - db_authors = session.execute(authors_query).scalars().all() + db_authors = session.execute(authors_query).scalars().unique().all() if not db_authors: return [] diff --git a/resolvers/reaction.py b/resolvers/reaction.py index e31711b2..02d4c028 100644 --- a/resolvers/reaction.py +++ b/resolvers/reaction.py @@ -79,7 +79,9 @@ def get_reactions_with_stat(q, limit=10, offset=0): >>> get_reactions_with_stat(q, 10, 0) # doctest: +SKIP [{'id': 1, 'body': 'Текст комментария', 'stat': {'rating': 5, 'comments_count': 3}, ...}] """ - q = q.distinct().limit(limit).offset(offset) + # Убираем distinct() поскольку GROUP BY уже обеспечивает уникальность, + # а distinct() вызывает ошибку PostgreSQL с JSON полями + q = q.limit(limit).offset(offset) reactions = [] with local_session() as session: diff --git a/resolvers/topic.py b/resolvers/topic.py index 8e2fa642..a329dbaf 100644 --- a/resolvers/topic.py +++ b/resolvers/topic.py @@ -37,7 +37,7 @@ async def get_all_topics(): with local_session() as session: # Запрос на получение базовой информации о темах topics_query = select(Topic) - topics = session.execute(topics_query).scalars().all() + topics = session.execute(topics_query).scalars().unique().all() # Преобразуем темы в словари return [topic.dict() for topic in topics] @@ -103,7 +103,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None) base_query = base_query.limit(limit).offset(offset) # Получаем темы - topics = session.execute(base_query).scalars().all() + topics = session.execute(base_query).scalars().unique().all() topic_ids = [topic.id for topic in topics] if not topic_ids: