Compare commits

..

No commits in common. "dev" and "feature/caching-improved" have entirely different histories.

41 changed files with 633 additions and 1684 deletions

4
.gitignore vendored
View File

@ -161,6 +161,4 @@ views.json
*.key *.key
*.crt *.crt
*cache.json *cache.json
.cursor .cursor
node_modules/

View File

@ -1,44 +1,3 @@
#### [0.4.20] - 2025-05-03
- Исправлена ошибка в классе `CacheRevalidationManager`: добавлена инициализация атрибута `_redis`
- Улучшена обработка соединения с Redis в менеджере ревалидации кэша:
- Автоматическое восстановление соединения в случае его потери
- Проверка соединения перед выполнением операций с кэшем
- Дополнительное логирование для упрощения диагностики проблем
- Исправлен резолвер `unpublish_shout`:
- Корректное формирование синтетического поля `publication` с `published_at: null`
- Возвращение полноценного словаря с данными вместо объекта модели
- Улучшена загрузка связанных данных (авторы, темы) для правильного формирования ответа
#### [0.4.19] - 2025-04-14
- dropped `Shout.description` and `Draft.description` to be UX-generated
- use redis to init views counters after migrator
#### [0.4.18] - 2025-04-10
- Fixed `Topic.stat.authors` and `Topic.stat.comments`
- Fixed unique constraint violation for empty slug values:
- Modified `update_draft` resolver to handle empty slug values
- Modified `create_draft` resolver to prevent empty slug values
- Added validation to prevent inserting or updating drafts with empty slug
- Fixed database error "duplicate key value violates unique constraint draft_slug_key"
#### [0.4.17] - 2025-03-26
- Fixed `'Reaction' object is not subscriptable` error in hierarchical comments:
- Modified `get_reactions_with_stat()` to convert Reaction objects to dictionaries
- Added default values for limit/offset parameters
- Fixed `load_first_replies()` implementation with proper parameter passing
- Added doctest with example usage
- Limited child comments to 100 per parent for performance
#### [0.4.16] - 2025-03-22
- Added hierarchical comments pagination:
- Created new GraphQL query `load_comments_branch` for efficient loading of hierarchical comments
- Ability to load root comments with their first N replies
- Added pagination for both root and child comments
- Using existing `comments_count` field in `Stat` type to display number of replies
- Added special `first_replies` field to store first replies to a comment
- Optimized SQL queries for efficient loading of comment hierarchies
- Implemented flexible comment sorting system (by time, rating)
#### [0.4.15] - 2025-03-22 #### [0.4.15] - 2025-03-22
- Upgraded caching system described `docs/caching.md` - Upgraded caching system described `docs/caching.md`
- Module `cache/memorycache.py` removed - Module `cache/memorycache.py` removed
@ -72,7 +31,8 @@
- Implemented persistent Redis caching for author queries without TTL (invalidated only on changes) - Implemented persistent Redis caching for author queries without TTL (invalidated only on changes)
- Optimized author retrieval with separate endpoints: - Optimized author retrieval with separate endpoints:
- `get_authors_all` - returns all non-deleted authors without statistics - `get_authors_all` - returns all non-deleted authors without statistics
- `load_authors_by` - optimized to use caching and efficient sorting and pagination - `get_authors_paginated` - returns authors with statistics and pagination support
- `load_authors_by` - optimized to use caching and efficient sorting
- Improved SQL queries with optimized JOIN conditions and efficient filtering - Improved SQL queries with optimized JOIN conditions and efficient filtering
- Added pre-aggregation of statistics (shouts count, followers count) in single efficient queries - Added pre-aggregation of statistics (shouts count, followers count) in single efficient queries
- Implemented robust cache invalidation on author updates - Implemented robust cache invalidation on author updates
@ -84,6 +44,7 @@
- Implemented persistent Redis caching for topic queries (no TTL, invalidated only on changes) - Implemented persistent Redis caching for topic queries (no TTL, invalidated only on changes)
- Optimized topic retrieval with separate endpoints for different use cases: - Optimized topic retrieval with separate endpoints for different use cases:
- `get_topics_all` - returns all topics without statistics for lightweight listing - `get_topics_all` - returns all topics without statistics for lightweight listing
- `get_topics_paginated` - returns topics with statistics and pagination support
- `get_topics_by_community` - adds pagination and optimized filtering by community - `get_topics_by_community` - adds pagination and optimized filtering by community
- Added SQLAlchemy-managed indexes directly in ORM models for automatic schema maintenance - Added SQLAlchemy-managed indexes directly in ORM models for automatic schema maintenance
- Created `sync_indexes()` function for automatic index synchronization during app startup - Created `sync_indexes()` function for automatic index synchronization during app startup
@ -181,7 +142,7 @@
#### [0.4.4] #### [0.4.4]
- `followers_stat` removed for shout - `followers_stat` removed for shout
- sqlite3 support added - sqlite3 support added
- `rating_stat` and `comments_count` fixes - `rating_stat` and `commented_stat` fixes
#### [0.4.3] #### [0.4.3]
- cache reimplemented - cache reimplemented

119
auth/usermodel.py Normal file
View File

@ -0,0 +1,119 @@
import time
from sqlalchemy import (
JSON,
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
func,
)
from sqlalchemy.orm import relationship
from services.db import Base
class Permission(Base):
__tablename__ = "permission"
id = Column(String, primary_key=True, unique=True, nullable=False, default=None)
resource = Column(String, nullable=False)
operation = Column(String, nullable=False)
class Role(Base):
__tablename__ = "role"
id = Column(String, primary_key=True, unique=True, nullable=False, default=None)
name = Column(String, nullable=False)
permissions = relationship(Permission)
class AuthorizerUser(Base):
__tablename__ = "authorizer_users"
id = Column(String, primary_key=True, unique=True, nullable=False, default=None)
key = Column(String)
email = Column(String, unique=True)
email_verified_at = Column(Integer)
family_name = Column(String)
gender = Column(String)
given_name = Column(String)
is_multi_factor_auth_enabled = Column(Boolean)
middle_name = Column(String)
nickname = Column(String)
password = Column(String)
phone_number = Column(String, unique=True)
phone_number_verified_at = Column(Integer)
# preferred_username = Column(String, nullable=False)
picture = Column(String)
revoked_timestamp = Column(Integer)
roles = Column(String, default="author,reader")
signup_methods = Column(String, default="magic_link_login")
created_at = Column(Integer, default=lambda: int(time.time()))
updated_at = Column(Integer, default=lambda: int(time.time()))
class UserRating(Base):
__tablename__ = "user_rating"
id = None
rater: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
user: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
value: Column = Column(Integer)
@staticmethod
def init_table():
pass
class UserRole(Base):
__tablename__ = "user_role"
id = None
user = Column(ForeignKey("user.id"), primary_key=True, index=True)
role = Column(ForeignKey("role.id"), primary_key=True, index=True)
class User(Base):
__tablename__ = "user"
default_user = None
email = Column(String, unique=True, nullable=False, comment="Email")
username = Column(String, nullable=False, comment="Login")
password = Column(String, nullable=True, comment="Password")
bio = Column(String, nullable=True, comment="Bio") # status description
about = Column(String, nullable=True, comment="About") # long and formatted
userpic = Column(String, nullable=True, comment="Userpic")
name = Column(String, nullable=True, comment="Display name")
slug = Column(String, unique=True, comment="User's slug")
links = Column(JSON, nullable=True, comment="Links")
oauth = Column(String, nullable=True)
oid = Column(String, nullable=True)
muted = Column(Boolean, default=False)
confirmed = Column(Boolean, default=False)
created_at = Column(DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at")
updated_at = Column(DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Updated at")
last_seen = Column(DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Was online at")
deleted_at = Column(DateTime(timezone=True), nullable=True, comment="Deleted at")
ratings = relationship(UserRating, foreign_keys=UserRating.user)
roles = relationship(lambda: Role, secondary=UserRole.__tablename__)
def get_permission(self):
scope = {}
for role in self.roles:
for p in role.permissions:
if p.resource not in scope:
scope[p.resource] = set()
scope[p.resource].add(p.operation)
print(scope)
return scope
# if __name__ == "__main__":
# print(User.get_permission(user_id=1))

5
cache/cache.py vendored
View File

@ -545,9 +545,8 @@ async def get_cached_data(key: str) -> Optional[Any]:
try: try:
cached_data = await redis.execute("GET", key) cached_data = await redis.execute("GET", key)
if cached_data: if cached_data:
loaded = orjson.loads(cached_data) logger.debug(f"Данные получены из кеша по ключу {key}")
logger.debug(f"Данные получены из кеша по ключу {key}: {len(loaded)}") return orjson.loads(cached_data)
return loaded
return None return None
except Exception as e: except Exception as e:
logger.error(f"Ошибка при получении данных из кеша: {e}") logger.error(f"Ошибка при получении данных из кеша: {e}")

15
cache/revalidator.py vendored
View File

@ -8,7 +8,6 @@ from cache.cache import (
invalidate_cache_by_prefix, invalidate_cache_by_prefix,
) )
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.redis import redis
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes
@ -22,19 +21,9 @@ class CacheRevalidationManager:
self.lock = asyncio.Lock() self.lock = asyncio.Lock()
self.running = True self.running = True
self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки
self._redis = redis # Добавлена инициализация _redis для доступа к Redis-клиенту
async def start(self): async def start(self):
"""Запуск фонового воркера для ревалидации кэша.""" """Запуск фонового воркера для ревалидации кэша."""
# Проверяем, что у нас есть соединение с Redis
if not self._redis._client:
logger.warning("Redis connection not established. Waiting for connection...")
try:
await self._redis.connect()
logger.info("Redis connection established for revalidation manager")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
self.task = asyncio.create_task(self.revalidate_cache()) self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self): async def revalidate_cache(self):
@ -50,10 +39,6 @@ class CacheRevalidationManager:
async def process_revalidation(self): async def process_revalidation(self):
"""Обновление кэша для всех сущностей, требующих ревалидации.""" """Обновление кэша для всех сущностей, требующих ревалидации."""
# Проверяем соединение с Redis
if not self._redis._client:
return # Выходим из метода, если не удалось подключиться
async with self.lock: async with self.lock:
# Ревалидация кэша авторов # Ревалидация кэша авторов
if self.items_to_revalidate["authors"]: if self.items_to_revalidate["authors"]:

24
cache/triggers.py vendored
View File

@ -88,11 +88,7 @@ def after_reaction_handler(mapper, connection, target):
with local_session() as session: with local_session() as session:
shout = ( shout = (
session.query(Shout) session.query(Shout)
.filter( .filter(Shout.id == shout_id, Shout.published_at.is_not(None), Shout.deleted_at.is_(None))
Shout.id == shout_id,
Shout.published_at.is_not(None),
Shout.deleted_at.is_(None),
)
.first() .first()
) )
@ -112,27 +108,15 @@ def events_register():
event.listen(AuthorFollower, "after_insert", after_follower_handler) event.listen(AuthorFollower, "after_insert", after_follower_handler)
event.listen(AuthorFollower, "after_update", after_follower_handler) event.listen(AuthorFollower, "after_update", after_follower_handler)
event.listen( event.listen(AuthorFollower, "after_delete", lambda *args: after_follower_handler(*args, is_delete=True))
AuthorFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(TopicFollower, "after_insert", after_follower_handler) event.listen(TopicFollower, "after_insert", after_follower_handler)
event.listen(TopicFollower, "after_update", after_follower_handler) event.listen(TopicFollower, "after_update", after_follower_handler)
event.listen( event.listen(TopicFollower, "after_delete", lambda *args: after_follower_handler(*args, is_delete=True))
TopicFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(ShoutReactionsFollower, "after_insert", after_follower_handler) event.listen(ShoutReactionsFollower, "after_insert", after_follower_handler)
event.listen(ShoutReactionsFollower, "after_update", after_follower_handler) event.listen(ShoutReactionsFollower, "after_update", after_follower_handler)
event.listen( event.listen(ShoutReactionsFollower, "after_delete", lambda *args: after_follower_handler(*args, is_delete=True))
ShoutReactionsFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(Reaction, "after_update", mark_for_revalidation) event.listen(Reaction, "after_update", mark_for_revalidation)
event.listen(Author, "after_update", mark_for_revalidation) event.listen(Author, "after_update", mark_for_revalidation)

View File

@ -147,32 +147,16 @@ await invalidate_topics_cache(456)
```python ```python
class CacheRevalidationManager: class CacheRevalidationManager:
def __init__(self, interval=CACHE_REVALIDATION_INTERVAL): # ...
# ...
self._redis = redis # Прямая ссылка на сервис Redis
async def start(self):
# Проверка и установка соединения с Redis
# ...
async def process_revalidation(self): async def process_revalidation(self):
# Обработка элементов для ревалидации
# ... # ...
def mark_for_revalidation(self, entity_id, entity_type): def mark_for_revalidation(self, entity_id, entity_type):
# Добавляет сущность в очередь на ревалидацию
# ... # ...
``` ```
Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации. Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации.
**Взаимодействие с Redis:** Особенности реализации:
- CacheRevalidationManager хранит прямую ссылку на сервис Redis через атрибут `_redis`
- При запуске проверяется наличие соединения с Redis и при необходимости устанавливается новое
- Включена автоматическая проверка соединения перед каждой операцией ревалидации
- Система самостоятельно восстанавливает соединение при его потере
**Особенности реализации:**
- Для авторов и тем используется поштучная ревалидация каждой записи - Для авторов и тем используется поштучная ревалидация каждой записи
- Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов - Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов
- При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки - При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки

View File

@ -1,165 +0,0 @@
# Пагинация комментариев
## Обзор
Реализована система пагинации комментариев по веткам, которая позволяет эффективно загружать и отображать вложенные ветки обсуждений. Основные преимущества:
1. Загрузка только необходимых комментариев, а не всего дерева
2. Снижение нагрузки на сервер и клиент
3. Возможность эффективной навигации по большим обсуждениям
4. Предзагрузка первых N ответов для улучшения UX
## API для иерархической загрузки комментариев
### GraphQL запрос `load_comments_branch`
```graphql
query LoadCommentsBranch(
$shout: Int!,
$parentId: Int,
$limit: Int,
$offset: Int,
$sort: ReactionSort,
$childrenLimit: Int,
$childrenOffset: Int
) {
load_comments_branch(
shout: $shout,
parent_id: $parentId,
limit: $limit,
offset: $offset,
sort: $sort,
children_limit: $childrenLimit,
children_offset: $childrenOffset
) {
id
body
created_at
created_by {
id
name
slug
pic
}
kind
reply_to
stat {
rating
comments_count
}
first_replies {
id
body
created_at
created_by {
id
name
slug
pic
}
kind
reply_to
stat {
rating
comments_count
}
}
}
}
```
### Параметры запроса
| Параметр | Тип | По умолчанию | Описание |
|----------|-----|--------------|----------|
| shout | Int! | - | ID статьи, к которой относятся комментарии |
| parent_id | Int | null | ID родительского комментария. Если null, загружаются корневые комментарии |
| limit | Int | 10 | Максимальное количество комментариев для загрузки |
| offset | Int | 0 | Смещение для пагинации |
| sort | ReactionSort | newest | Порядок сортировки: newest, oldest, like |
| children_limit | Int | 3 | Максимальное количество дочерних комментариев для каждого родительского |
| children_offset | Int | 0 | Смещение для пагинации дочерних комментариев |
### Поля в ответе
Каждый комментарий содержит следующие основные поля:
- `id`: ID комментария
- `body`: Текст комментария
- `created_at`: Время создания
- `created_by`: Информация об авторе
- `kind`: Тип реакции (COMMENT)
- `reply_to`: ID родительского комментария (null для корневых)
- `first_replies`: Первые N дочерних комментариев
- `stat`: Статистика комментария, включающая:
- `comments_count`: Количество ответов на комментарий
- `rating`: Рейтинг комментария
## Примеры использования
### Загрузка корневых комментариев с первыми ответами
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
limit: 10,
offset: 0,
sort: "newest",
childrenLimit: 3
}
});
```
### Загрузка ответов на конкретный комментарий
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
parentId: 123, // ID комментария, для которого загружаем ответы
limit: 10,
offset: 0,
sort: "oldest" // Сортируем ответы от старых к новым
}
});
```
### Пагинация дочерних комментариев
Для загрузки дополнительных ответов на комментарий:
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
parentId: 123,
limit: 10,
offset: 0,
childrenLimit: 5,
childrenOffset: 3 // Пропускаем первые 3 комментария (уже загруженные)
}
});
```
## Рекомендации по клиентской реализации
1. Для эффективной работы со сложными ветками обсуждений рекомендуется:
- Сначала загружать только корневые комментарии с первыми N ответами
- При наличии дополнительных ответов (когда `stat.comments_count > first_replies.length`)
добавить кнопку "Показать все ответы"
- При нажатии на кнопку загружать дополнительные ответы с помощью запроса с указанным `parentId`
2. Для сортировки:
- По умолчанию использовать `newest` для отображения свежих обсуждений
- Предусмотреть переключатель сортировки для всего дерева комментариев
- При изменении сортировки перезагружать данные с новым параметром `sort`
3. Для улучшения производительности:
- Кешировать результаты запросов на клиенте
- Использовать оптимистичные обновления при добавлении/редактировании комментариев
- При необходимости загружать комментарии порциями (ленивая загрузка)

View File

@ -34,15 +34,4 @@
- Поддерживаемые методы: GET, POST, OPTIONS - Поддерживаемые методы: GET, POST, OPTIONS
- Настроена поддержка credentials - Настроена поддержка credentials
- Разрешенные заголовки: Authorization, Content-Type, X-Requested-With, DNT, Cache-Control - Разрешенные заголовки: Authorization, Content-Type, X-Requested-With, DNT, Cache-Control
- Настроено кэширование preflight-ответов на 20 дней (1728000 секунд) - Настроено кэширование preflight-ответов на 20 дней (1728000 секунд)
## Пагинация комментариев по веткам
- Эффективная загрузка комментариев с учетом их иерархической структуры
- Отдельный запрос `load_comments_branch` для оптимизированной загрузки ветки комментариев
- Возможность загрузки корневых комментариев статьи с первыми ответами на них
- Гибкая пагинация как для корневых, так и для дочерних комментариев
- Использование поля `stat.comments_count` для отображения количества ответов на комментарий
- Добавление специального поля `first_replies` для хранения первых ответов на комментарий
- Поддержка различных методов сортировки (новые, старые, популярные)
- Оптимизированные SQL запросы для минимизации нагрузки на базу данных

20
main.py
View File

@ -7,7 +7,6 @@ from os.path import exists
from ariadne import load_schema_from_path, make_executable_schema from ariadne import load_schema_from_path, make_executable_schema
from ariadne.asgi import GraphQL from ariadne.asgi import GraphQL
from starlette.applications import Starlette from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request from starlette.requests import Request
from starlette.responses import JSONResponse, Response from starlette.responses import JSONResponse, Response
@ -74,24 +73,6 @@ async def graphql_handler(request: Request):
print(f"GraphQL error: {str(e)}") print(f"GraphQL error: {str(e)}")
return JSONResponse({"error": str(e)}, status_code=500) return JSONResponse({"error": str(e)}, status_code=500)
middleware = [
# Начинаем с обработки ошибок
Middleware(ExceptionHandlerMiddleware),
# CORS должен быть перед другими middleware для корректной обработки preflight-запросов
Middleware(
CORSMiddleware,
allow_origins=[
"https://localhost:3000",
"https://testing.discours.io",
"https://testing3.discours.io",
"https://discours.io",
"https://new.discours.io"
],
allow_methods=["GET", "POST", "OPTIONS"], # Явно указываем OPTIONS
allow_headers=["*"],
allow_credentials=True,
),
]
# Обновляем маршрут в Starlette # Обновляем маршрут в Starlette
app = Starlette( app = Starlette(
@ -99,7 +80,6 @@ app = Starlette(
Route("/", graphql_handler, methods=["GET", "POST"]), Route("/", graphql_handler, methods=["GET", "POST"]),
Route("/new-author", WebhookEndpoint), Route("/new-author", WebhookEndpoint),
], ],
middleware=middleware,
lifespan=lifespan, lifespan=lifespan,
debug=True, debug=True,
) )

View File

@ -1,5 +1,5 @@
log_format custom '$remote_addr - $remote_user [$time_local] "$request" ' log_format custom '$remote_addr - $remote_user [$time_local] "$request" '
'origin=$http_origin status=$status ' 'origin=$http_origin allow_origin=$allow_origin status=$status '
'"$http_referer" "$http_user_agent"'; '"$http_referer" "$http_user_agent"';
{{ $proxy_settings := "proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $http_connection; proxy_set_header Host $http_host; proxy_set_header X-Request-Start $msec;" }} {{ $proxy_settings := "proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $http_connection; proxy_set_header Host $http_host; proxy_set_header X-Request-Start $msec;" }}
@ -49,6 +49,34 @@ server {
{{ $proxy_settings }} {{ $proxy_settings }}
{{ $gzip_settings }} {{ $gzip_settings }}
# Handle CORS for OPTIONS method
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' $allow_origin always;
add_header 'Access-Control-Allow-Methods' 'POST, GET, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization' always;
add_header 'Access-Control-Allow-Credentials' 'true' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
# Handle CORS for POST method
if ($request_method = 'POST') {
add_header 'Access-Control-Allow-Origin' $allow_origin always;
add_header 'Access-Control-Allow-Methods' 'POST, GET, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization' always;
add_header 'Access-Control-Allow-Credentials' 'true' always;
}
# Handle CORS for GET method
if ($request_method = 'GET') {
add_header 'Access-Control-Allow-Origin' $allow_origin always;
add_header 'Access-Control-Allow-Methods' 'POST, GET, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization' always;
add_header 'Access-Control-Allow-Credentials' 'true' always;
}
proxy_cache my_cache; proxy_cache my_cache;
proxy_cache_revalidate on; proxy_cache_revalidate on;
proxy_cache_min_uses 2; proxy_cache_min_uses 2;
@ -67,7 +95,14 @@ server {
} }
location ~* \.(mp3|wav|ogg|flac|aac|aif|webm)$ { location ~* \.(mp3|wav|ogg|flac|aac|aif|webm)$ {
proxy_pass http://{{ $.APP }}-{{ $upstream_port }}; proxy_pass http://{{ $.APP }}-{{ $upstream_port }};
if ($request_method = 'GET') {
add_header 'Access-Control-Allow-Origin' $allow_origin always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
add_header 'Access-Control-Allow-Credentials' 'true' always;
}
} }

View File

@ -90,6 +90,7 @@ class Author(Base):
Модель автора в системе. Модель автора в системе.
Attributes: Attributes:
user (str): Идентификатор пользователя в системе авторизации
name (str): Отображаемое имя name (str): Отображаемое имя
slug (str): Уникальный строковый идентификатор slug (str): Уникальный строковый идентификатор
bio (str): Краткая биография/статус bio (str): Краткая биография/статус
@ -104,6 +105,8 @@ class Author(Base):
__tablename__ = "author" __tablename__ = "author"
user = Column(String) # unbounded link with authorizer's User type
name = Column(String, nullable=True, comment="Display name") name = Column(String, nullable=True, comment="Display name")
slug = Column(String, unique=True, comment="Author's slug") slug = Column(String, unique=True, comment="Author's slug")
bio = Column(String, nullable=True, comment="Bio") # status description bio = Column(String, nullable=True, comment="Bio") # status description
@ -121,14 +124,12 @@ class Author(Base):
# Определяем индексы # Определяем индексы
__table_args__ = ( __table_args__ = (
# Индекс для быстрого поиска по имени
Index("idx_author_name", "name"),
# Индекс для быстрого поиска по slug # Индекс для быстрого поиска по slug
Index("idx_author_slug", "slug"), Index("idx_author_slug", "slug"),
# Индекс для быстрого поиска по идентификатору пользователя
Index("idx_author_user", "user"),
# Индекс для фильтрации неудаленных авторов # Индекс для фильтрации неудаленных авторов
Index( Index("idx_author_deleted_at", "deleted_at", postgresql_where=deleted_at.is_(None)),
"idx_author_deleted_at", "deleted_at", postgresql_where=deleted_at.is_(None)
),
# Индекс для сортировки по времени создания (для новых авторов) # Индекс для сортировки по времени создания (для новых авторов)
Index("idx_author_created_at", "created_at"), Index("idx_author_created_at", "created_at"),
# Индекс для сортировки по времени последнего посещения # Индекс для сортировки по времени последнего посещения

View File

@ -26,14 +26,11 @@ class DraftAuthor(Base):
caption = Column(String, nullable=True, default="") caption = Column(String, nullable=True, default="")
class Draft(Base): class Draft(Base):
__tablename__ = "draft" __tablename__ = "draft"
# required # required
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time())) created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time()))
# Колонки для связей с автором created_by: int = Column(ForeignKey("author.id"), nullable=False)
created_by: int = Column("created_by", ForeignKey("author.id"), nullable=False)
community: int = Column("community", ForeignKey("community.id"), nullable=False, default=1)
# optional # optional
layout: str = Column(String, nullable=True, default="article") layout: str = Column(String, nullable=True, default="article")
@ -41,6 +38,7 @@ class Draft(Base):
title: str = Column(String, nullable=True) title: str = Column(String, nullable=True)
subtitle: str | None = Column(String, nullable=True) subtitle: str | None = Column(String, nullable=True)
lead: str | None = Column(String, nullable=True) lead: str | None = Column(String, nullable=True)
description: str | None = Column(String, nullable=True)
body: str = Column(String, nullable=False, comment="Body") body: str = Column(String, nullable=False, comment="Body")
media: dict | None = Column(JSON, nullable=True) media: dict | None = Column(JSON, nullable=True)
cover: str | None = Column(String, nullable=True, comment="Cover image url") cover: str | None = Column(String, nullable=True, comment="Cover image url")
@ -51,55 +49,7 @@ class Draft(Base):
# auto # auto
updated_at: int | None = Column(Integer, nullable=True, index=True) updated_at: int | None = Column(Integer, nullable=True, index=True)
deleted_at: int | None = Column(Integer, nullable=True, index=True) deleted_at: int | None = Column(Integer, nullable=True, index=True)
updated_by: int | None = Column("updated_by", ForeignKey("author.id"), nullable=True) updated_by: int | None = Column(ForeignKey("author.id"), nullable=True)
deleted_by: int | None = Column("deleted_by", ForeignKey("author.id"), nullable=True) deleted_by: int | None = Column(ForeignKey("author.id"), nullable=True)
authors = relationship(Author, secondary="draft_author")
# --- Relationships --- topics = relationship(Topic, secondary="draft_topic")
# Только many-to-many связи через вспомогательные таблицы
authors = relationship(Author, secondary="draft_author", lazy="select")
topics = relationship(Topic, secondary="draft_topic", lazy="select")
# Связь с Community (если нужна как объект, а не ID)
# community = relationship("Community", foreign_keys=[community_id], lazy="joined")
# Пока оставляем community_id как ID
# Связь с публикацией (один-к-одному или один-к-нулю)
# Загружается через joinedload в резолвере
publication = relationship(
"Shout",
primaryjoin="Draft.id == Shout.draft",
foreign_keys="Shout.draft",
uselist=False,
lazy="noload", # Не грузим по умолчанию, только через options
viewonly=True # Указываем, что это связь только для чтения
)
def dict(self):
"""
Сериализует объект Draft в словарь.
Гарантирует, что поля topics и authors всегда будут списками.
"""
return {
"id": self.id,
"created_at": self.created_at,
"created_by": self.created_by,
"community": self.community,
"layout": self.layout,
"slug": self.slug,
"title": self.title,
"subtitle": self.subtitle,
"lead": self.lead,
"body": self.body,
"media": self.media or [],
"cover": self.cover,
"cover_caption": self.cover_caption,
"lang": self.lang,
"seo": self.seo,
"updated_at": self.updated_at,
"deleted_at": self.deleted_at,
"updated_by": self.updated_by,
"deleted_by": self.deleted_by,
# Гарантируем, что topics и authors всегда будут списками
"topics": [topic.dict() for topic in (self.topics or [])],
"authors": [author.dict() for author in (self.authors or [])]
}

View File

@ -91,6 +91,7 @@ class Shout(Base):
cover: str | None = Column(String, nullable=True, comment="Cover image url") cover: str | None = Column(String, nullable=True, comment="Cover image url")
cover_caption: str | None = Column(String, nullable=True, comment="Cover image alt caption") cover_caption: str | None = Column(String, nullable=True, comment="Cover image alt caption")
lead: str | None = Column(String, nullable=True) lead: str | None = Column(String, nullable=True)
description: str | None = Column(String, nullable=True)
title: str = Column(String, nullable=False) title: str = Column(String, nullable=False)
subtitle: str | None = Column(String, nullable=True) subtitle: str | None = Column(String, nullable=True)
layout: str = Column(String, nullable=False, default="article") layout: str = Column(String, nullable=False, default="article")

View File

@ -1,2 +0,0 @@
[tool.ruff]
line-length = 108

View File

@ -14,5 +14,4 @@ gql
ariadne ariadne
granian granian
orjson orjson
pydantic pydantic
trafilatura

View File

@ -16,11 +16,9 @@ from resolvers.draft import (
delete_draft, delete_draft,
load_drafts, load_drafts,
publish_draft, publish_draft,
unpublish_draft,
update_draft, update_draft,
) )
from resolvers.editor import (
unpublish_shout,
)
from resolvers.feed import ( from resolvers.feed import (
load_shouts_coauthored, load_shouts_coauthored,
load_shouts_discussed, load_shouts_discussed,
@ -39,7 +37,6 @@ from resolvers.reaction import (
create_reaction, create_reaction,
delete_reaction, delete_reaction,
load_comment_ratings, load_comment_ratings,
load_comments_branch,
load_reactions_by, load_reactions_by,
load_shout_comments, load_shout_comments,
load_shout_ratings, load_shout_ratings,
@ -110,7 +107,6 @@ __all__ = [
"load_shout_comments", "load_shout_comments",
"load_shout_ratings", "load_shout_ratings",
"load_comment_ratings", "load_comment_ratings",
"load_comments_branch",
# notifier # notifier
"load_notifications", "load_notifications",
"notifications_seen_thread", "notifications_seen_thread",

View File

@ -232,6 +232,22 @@ async def get_authors_all(_, _info):
return await get_all_authors() return await get_all_authors()
@query.field("get_authors_paginated")
async def get_authors_paginated(_, _info, limit=50, offset=0, by=None):
"""
Получает список авторов с пагинацией и статистикой.
Args:
limit: Максимальное количество возвращаемых авторов
offset: Смещение для пагинации
by: Параметр сортировки (new/active)
Returns:
list: Список авторов с их статистикой
"""
return await get_authors_with_stats(limit, offset, by)
@query.field("get_author") @query.field("get_author")
async def get_author(_, _info, slug="", author_id=0): async def get_author(_, _info, slug="", author_id=0):
author_dict = None author_dict = None

View File

@ -1,6 +1,7 @@
import time import time
import trafilatura from operator import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import and_
from cache.cache import ( from cache.cache import (
cache_author, cache_author,
@ -10,7 +11,7 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft, DraftAuthor, DraftTopic from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from services.auth import login_required from services.auth import login_required
@ -18,70 +19,35 @@ from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import mutation, query from services.schema import mutation, query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
def create_shout_from_draft(session, draft, author_id): def create_shout_from_draft(session, draft, author_id):
"""
Создаёт новый объект публикации (Shout) на основе черновика.
Args:
session: SQLAlchemy сессия (не используется, для совместимости)
draft (Draft): Объект черновика
author_id (int): ID автора публикации
Returns:
Shout: Новый объект публикации (не сохранённый в базе)
Пример:
>>> from orm.draft import Draft
>>> draft = Draft(id=1, title='Заголовок', body='Текст', slug='slug', created_by=1)
>>> shout = create_shout_from_draft(None, draft, 1)
>>> shout.title
'Заголовок'
>>> shout.body
'Текст'
>>> shout.created_by
1
"""
# Создаем новую публикацию # Создаем новую публикацию
shout = Shout( shout = Shout(
body=draft.body or "", body=draft.body,
slug=draft.slug, slug=draft.slug,
cover=draft.cover, cover=draft.cover,
cover_caption=draft.cover_caption, cover_caption=draft.cover_caption,
lead=draft.lead, lead=draft.lead,
title=draft.title or "", description=draft.description,
title=draft.title,
subtitle=draft.subtitle, subtitle=draft.subtitle,
layout=draft.layout or "article", layout=draft.layout,
media=draft.media or [], media=draft.media,
lang=draft.lang or "ru", lang=draft.lang,
seo=draft.seo, seo=draft.seo,
created_by=author_id, created_by=author_id,
community=draft.community, community=draft.community,
draft=draft.id, draft=draft.id,
deleted_at=None, deleted_at=None,
) )
# Инициализируем пустые массивы для связей
shout.topics = []
shout.authors = []
return shout return shout
@query.field("load_drafts") @query.field("load_drafts")
@login_required @login_required
async def load_drafts(_, info): async def load_drafts(_, info):
"""
Загружает все черновики, доступные текущему пользователю.
Предварительно загружает связанные объекты (topics, authors, publication),
чтобы избежать ошибок с отсоединенными объектами при сериализации.
Returns:
dict: Список черновиков или сообщение об ошибке
"""
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
@ -89,44 +55,13 @@ async def load_drafts(_, info):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "User ID and author ID are required"} return {"error": "User ID and author ID are required"}
try: with local_session() as session:
with local_session() as session: drafts = (
# Предзагружаем authors, topics и связанную publication session.query(Draft)
drafts_query = ( .filter(or_(Draft.authors.any(Author.id == author_id), Draft.created_by == author_id))
session.query(Draft) .all()
.options( )
joinedload(Draft.topics), return {"drafts": drafts}
joinedload(Draft.authors),
joinedload(Draft.publication) # Загружаем связанную публикацию
)
.filter(Draft.authors.any(Author.id == author_id))
)
drafts = drafts_query.all()
# Преобразуем объекты в словари, пока они в контексте сессии
drafts_data = []
for draft in drafts:
draft_dict = draft.dict()
# Всегда возвращаем массив для topics, даже если он пустой
draft_dict["topics"] = [topic.dict() for topic in (draft.topics or [])]
draft_dict["authors"] = [author.dict() for author in (draft.authors or [])]
# Добавляем информацию о публикации, если она есть
if draft.publication:
draft_dict["publication"] = {
"id": draft.publication.id,
"slug": draft.publication.slug,
"published_at": draft.publication.published_at
}
else:
draft_dict["publication"] = None
drafts_data.append(draft_dict)
return {"drafts": drafts_data}
except Exception as e:
logger.error(f"Failed to load drafts: {e}", exc_info=True)
return {"error": f"Failed to load drafts: {str(e)}"}
@mutation.field("create_draft") @mutation.field("create_draft")
@ -170,40 +105,23 @@ async def create_draft(_, info, draft_input):
if "title" not in draft_input or not draft_input["title"]: if "title" not in draft_input or not draft_input["title"]:
draft_input["title"] = "" # Пустая строка вместо NULL draft_input["title"] = "" # Пустая строка вместо NULL
# Проверяем slug - он должен быть или не пустым, или не передаваться вообще
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""):
# При создании черновика удаляем пустой slug из входных данных
del draft_input["slug"]
try: try:
with local_session() as session: with local_session() as session:
# Remove id from input if present since it's auto-generated # Remove id from input if present since it's auto-generated
if "id" in draft_input: if "id" in draft_input:
del draft_input["id"] del draft_input["id"]
# Добавляем текущее время создания и ID автора # Добавляем текущее время создания
draft_input["created_at"] = int(time.time()) draft_input["created_at"] = int(time.time())
draft_input["created_by"] = author_id
draft = Draft(**draft_input) draft = Draft(created_by=author_id, **draft_input)
session.add(draft) session.add(draft)
session.flush()
# Добавляем создателя как автора
da = DraftAuthor(shout=draft.id, author=author_id)
session.add(da)
session.commit() session.commit()
return {"draft": draft} return {"draft": draft}
except Exception as e: except Exception as e:
logger.error(f"Failed to create draft: {e}", exc_info=True) logger.error(f"Failed to create draft: {e}", exc_info=True)
return {"error": f"Failed to create draft: {str(e)}"} return {"error": f"Failed to create draft: {str(e)}"}
def generate_teaser(body, limit=300):
body_html = wrap_html_fragment(body)
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False)
body_teaser = ". ".join(body_text[:limit].split(". ")[:-1])
return body_teaser
@mutation.field("update_draft") @mutation.field("update_draft")
@login_required @login_required
@ -212,21 +130,7 @@ async def update_draft(_, info, draft_id: int, draft_input):
Args: Args:
draft_id: ID черновика для обновления draft_id: ID черновика для обновления
draft_input: Данные для обновления черновика согласно схеме DraftInput: draft_input: Данные для обновления черновика
- layout: String
- author_ids: [Int!]
- topic_ids: [Int!]
- main_topic_id: Int
- media: [MediaItemInput]
- lead: String
- subtitle: String
- lang: String
- seo: String
- body: String
- title: String
- slug: String
- cover: String
- cover_caption: String
Returns: Returns:
dict: Обновленный черновик или сообщение об ошибке dict: Обновленный черновик или сообщение об ошибке
@ -238,89 +142,15 @@ async def update_draft(_, info, draft_id: int, draft_input):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "Author ID are required"} return {"error": "Author ID are required"}
try: with local_session() as session:
with local_session() as session: draft = session.query(Draft).filter(Draft.id == draft_id).first()
draft = session.query(Draft).filter(Draft.id == draft_id).first() if not draft:
if not draft: return {"error": "Draft not found"}
return {"error": "Draft not found"}
# Фильтруем входные данные, оставляя только разрешенные поля Draft.update(draft, draft_input)
allowed_fields = { draft.updated_at = int(time.time())
"layout", "author_ids", "topic_ids", "main_topic_id", session.commit()
"media", "lead", "subtitle", "lang", "seo", "body", return {"draft": draft}
"title", "slug", "cover", "cover_caption"
}
filtered_input = {k: v for k, v in draft_input.items() if k in allowed_fields}
# Проверяем slug
if "slug" in filtered_input and not filtered_input["slug"]:
del filtered_input["slug"]
# Обновляем связи с авторами если переданы
if "author_ids" in filtered_input:
author_ids = filtered_input.pop("author_ids")
if author_ids:
# Очищаем текущие связи
session.query(DraftAuthor).filter(DraftAuthor.shout == draft_id).delete()
# Добавляем новые связи
for aid in author_ids:
da = DraftAuthor(shout=draft_id, author=aid)
session.add(da)
# Обновляем связи с темами если переданы
if "topic_ids" in filtered_input:
topic_ids = filtered_input.pop("topic_ids")
main_topic_id = filtered_input.pop("main_topic_id", None)
if topic_ids:
# Очищаем текущие связи
session.query(DraftTopic).filter(DraftTopic.shout == draft_id).delete()
# Добавляем новые связи
for tid in topic_ids:
dt = DraftTopic(
shout=draft_id,
topic=tid,
main=(tid == main_topic_id) if main_topic_id else False
)
session.add(dt)
# Генерируем SEO если не предоставлено
if "seo" not in filtered_input and not draft.seo:
body_src = filtered_input.get("body", draft.body)
lead_src = filtered_input.get("lead", draft.lead)
body_html = wrap_html_fragment(body_src)
lead_html = wrap_html_fragment(lead_src)
try:
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False) if body_src else None
lead_text = trafilatura.extract(lead_html, include_comments=False, include_tables=False) if lead_src else None
body_teaser = generate_teaser(body_text, 300) if body_text else ""
filtered_input["seo"] = lead_text if lead_text else body_teaser
except Exception as e:
logger.warning(f"Failed to generate SEO for draft {draft_id}: {e}")
# Обновляем основные поля черновика
for key, value in filtered_input.items():
setattr(draft, key, value)
# Обновляем метаданные
draft.updated_at = int(time.time())
draft.updated_by = author_id
session.commit()
# Преобразуем объект в словарь для ответа
draft_dict = draft.dict()
draft_dict["topics"] = [topic.dict() for topic in draft.topics]
draft_dict["authors"] = [author.dict() for author in draft.authors]
# Добавляем объект автора в updated_by
draft_dict["updated_by"] = author_dict
return {"draft": draft_dict}
except Exception as e:
logger.error(f"Failed to update draft: {e}", exc_info=True)
return {"error": f"Failed to update draft: {str(e)}"}
@mutation.field("delete_draft") @mutation.field("delete_draft")
@ -340,136 +170,183 @@ async def delete_draft(_, info, draft_id: int):
return {"draft": draft} return {"draft": draft}
def validate_html_content(html_content: str) -> tuple[bool, str]:
"""
Проверяет валидность HTML контента через trafilatura.
Args:
html_content: HTML строка для проверки
Returns:
tuple[bool, str]: (валидность, сообщение об ошибке)
Example:
>>> is_valid, error = validate_html_content("<p>Valid HTML</p>")
>>> is_valid
True
>>> error
''
>>> is_valid, error = validate_html_content("Invalid < HTML")
>>> is_valid
False
>>> 'Invalid HTML' in error
True
"""
if not html_content or not html_content.strip():
return False, "Content is empty"
try:
html_content = wrap_html_fragment(html_content)
extracted = trafilatura.extract(html_content)
if not extracted:
return False, "Invalid HTML structure or empty content"
return True, ""
except Exception as e:
logger.error(f"HTML validation error: {e}", exc_info=True)
return False, f"Invalid HTML content: {str(e)}"
@mutation.field("publish_draft") @mutation.field("publish_draft")
@login_required @login_required
async def publish_draft(_, info, draft_id: int): async def publish_draft(_, info, draft_id: int):
""" user_id = info.context.get("user_id")
Публикует черновик, создавая новый Shout или обновляя существующий. author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = create_shout_from_draft(session, draft, author_id)
session.add(shout)
session.commit()
return {"shout": shout, "draft": draft}
@mutation.field("unpublish_draft")
@login_required
async def unpublish_draft(_, info, draft_id: int):
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = session.query(Shout).filter(Shout.draft == draft.id).first()
if shout:
shout.published_at = None
session.commit()
return {"shout": shout, "draft": draft}
return {"error": "Failed to unpublish draft"}
@mutation.field("publish_shout")
@login_required
async def publish_shout(_, info, shout_id: int):
"""Publish draft as a shout or update existing shout.
Args: Args:
draft_id (int): ID черновика для публикации shout_id: ID существующей публикации или 0 для новой
draft: Объект черновика (опционально)
Returns:
dict: Результат публикации с shout или сообщением об ошибке
""" """
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
now = int(time.time())
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "Author ID is required"} return {"error": "User ID and author ID are required"}
try: try:
with local_session() as session: with local_session() as session:
# Загружаем черновик со всеми связями shout = session.query(Shout).filter(Shout.id == shout_id).first()
draft = ( if not shout:
session.query(Draft) return {"error": "Shout not found"}
.options( was_published = shout.published_at is not None
joinedload(Draft.topics), draft = session.query(Draft).where(Draft.id == shout.draft).first()
joinedload(Draft.authors),
joinedload(Draft.publication)
)
.filter(Draft.id == draft_id)
.first()
)
if not draft: if not draft:
return {"error": "Draft not found"} return {"error": "Draft not found"}
# Находим черновик если не передан
# Проверка валидности HTML в body if not shout:
is_valid, error = validate_html_content(draft.body)
if not is_valid:
return {"error": f"Cannot publish draft: {error}"}
# Проверяем, есть ли уже публикация для этого черновика
if draft.publication:
shout = draft.publication
# Обновляем существующую публикацию
for field in ["body", "title", "subtitle", "lead", "cover", "cover_caption", "media", "lang", "seo"]:
if hasattr(draft, field):
setattr(shout, field, getattr(draft, field))
shout.updated_at = int(time.time())
shout.updated_by = author_id
else:
# Создаем новую публикацию
shout = create_shout_from_draft(session, draft, author_id) shout = create_shout_from_draft(session, draft, author_id)
now = int(time.time()) else:
shout.created_at = now # Обновляем существующую публикацию
shout.published_at = now shout.draft = draft.id
session.add(shout) shout.created_by = author_id
session.flush() # Получаем ID нового шаута shout.title = draft.title
shout.subtitle = draft.subtitle
shout.body = draft.body
shout.cover = draft.cover
shout.cover_caption = draft.cover_caption
shout.lead = draft.lead
shout.description = draft.description
shout.layout = draft.layout
shout.media = draft.media
shout.lang = draft.lang
shout.seo = draft.seo
# Очищаем существующие связи draft.updated_at = now
session.query(ShoutAuthor).filter(ShoutAuthor.shout == shout.id).delete() shout.updated_at = now
session.query(ShoutTopic).filter(ShoutTopic.shout == shout.id).delete()
# Добавляем авторов # Устанавливаем published_at только если была ранее снята с публикации
for author in (draft.authors or []): if not was_published:
sa = ShoutAuthor(shout=shout.id, author=author.id) shout.published_at = now
# Обрабатываем связи с авторами
if (
not session.query(ShoutAuthor)
.filter(and_(ShoutAuthor.shout == shout.id, ShoutAuthor.author == author_id))
.first()
):
sa = ShoutAuthor(shout=shout.id, author=author_id)
session.add(sa) session.add(sa)
# Добавляем темы # Обрабатываем темы
for topic in (draft.topics or []): if draft.topics:
st = ShoutTopic( for topic in draft.topics:
topic=topic.id, st = ShoutTopic(
shout=shout.id, topic=topic.id, shout=shout.id, main=topic.main if hasattr(topic, "main") else False
main=topic.main if hasattr(topic, "main") else False )
) session.add(st)
session.add(st)
session.add(shout)
session.add(draft)
session.flush()
# Инвалидируем кэш только если это новая публикация или была снята с публикации
if not was_published:
cache_keys = ["feed", f"author_{author_id}", "random_top", "unrated"]
# Добавляем ключи для тем
for topic in shout.topics:
cache_keys.append(f"topic_{topic.id}")
cache_keys.append(f"topic_shouts_{topic.id}")
await cache_by_id(Topic, topic.id, cache_topic)
# Инвалидируем кэш
await invalidate_shouts_cache(cache_keys)
await invalidate_shout_related_cache(shout, author_id)
# Обновляем кэш авторов
for author in shout.authors:
await cache_by_id(Author, author.id, cache_author)
# Отправляем уведомление о публикации
await notify_shout(shout.dict(), "published")
# Обновляем поисковый индекс
search_service.index(shout)
else:
# Для уже опубликованных материалов просто отправляем уведомление об обновлении
await notify_shout(shout.dict(), "update")
session.commit() session.commit()
# Инвалидируем кеш
invalidate_shouts_cache()
invalidate_shout_related_cache(shout.id)
# Уведомляем о публикации
await notify_shout(shout.id)
# Обновляем поисковый индекс
search_service.index_shout(shout)
logger.info(f"Successfully published shout #{shout.id} from draft #{draft_id}")
logger.debug(f"Shout data: {shout.dict()}")
return {"shout": shout} return {"shout": shout}
except Exception as e: except Exception as e:
logger.error(f"Failed to publish draft {draft_id}: {e}", exc_info=True) logger.error(f"Failed to publish shout: {e}", exc_info=True)
return {"error": f"Failed to publish draft: {str(e)}"} if "session" in locals():
session.rollback()
return {"error": f"Failed to publish shout: {str(e)}"}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Unpublish a shout.
Args:
shout_id: The ID of the shout to unpublish
Returns:
dict: The unpublished shout or an error message
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
shout.published_at = None
session.commit()
invalidate_shout_related_cache(shout)
invalidate_shouts_cache()
except Exception:
session.rollback()
return {"error": "Failed to unpublish shout"}
return {"shout": shout}

View File

@ -1,9 +1,8 @@
import time import time
import orjson import orjson
import trafilatura
from sqlalchemy import and_, desc, select from sqlalchemy import and_, desc, select
from sqlalchemy.orm import joinedload, selectinload from sqlalchemy.orm import joinedload
from sqlalchemy.sql.functions import coalesce from sqlalchemy.sql.functions import coalesce
from cache.cache import ( from cache.cache import (
@ -13,7 +12,6 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from resolvers.follower import follow, unfollow from resolvers.follower import follow, unfollow
@ -21,9 +19,8 @@ from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import mutation, query from services.schema import query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
@ -179,18 +176,9 @@ async def create_shout(_, info, inp):
logger.info(f"Creating shout with input: {inp}") logger.info(f"Creating shout with input: {inp}")
# Создаем публикацию без topics # Создаем публикацию без topics
body = inp.get("body", "")
lead = inp.get("lead", "")
body_html = wrap_html_fragment(body)
lead_html = wrap_html_fragment(lead)
body_text = trafilatura.extract(body_html)
lead_text = trafilatura.extract(lead_html)
seo = inp.get("seo", lead_text.strip() or body_text.strip()[:300].split(". ")[:-1].join(". "))
new_shout = Shout( new_shout = Shout(
slug=slug, slug=slug,
body=body, body=inp.get("body", ""),
seo=seo,
lead=lead,
layout=inp.get("layout", "article"), layout=inp.get("layout", "article"),
title=inp.get("title", ""), title=inp.get("title", ""),
created_by=author_id, created_by=author_id,
@ -392,7 +380,7 @@ def patch_topics(session, shout, topics_input):
# @login_required # @login_required
async def update_shout(_, info, shout_id: int, shout_input=None, publish=False): async def update_shout(_, info, shout_id: int, shout_input=None, publish=False):
logger.info(f"Starting update_shout with id={shout_id}, publish={publish}") logger.info(f"Starting update_shout with id={shout_id}, publish={publish}")
logger.debug(f"Full shout_input: {shout_input}") # DraftInput logger.debug(f"Full shout_input: {shout_input}")
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
roles = info.context.get("roles", []) roles = info.context.get("roles", [])
@ -649,178 +637,39 @@ def get_main_topic(topics):
"""Get the main topic from a list of ShoutTopic objects.""" """Get the main topic from a list of ShoutTopic objects."""
logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics") logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics")
logger.debug( logger.debug(
f"Topics data: {[(t.slug, getattr(t, 'main', False)) for t in topics] if topics else []}" f"Topics data: {[(t.topic.slug if t.topic else 'no-topic', t.main) for t in topics] if topics else []}"
) )
if not topics: if not topics:
logger.warning("No topics provided to get_main_topic") logger.warning("No topics provided to get_main_topic")
return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True} return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
# Проверяем, является ли topics списком объектов ShoutTopic или Topic # Find first main topic in original order
if hasattr(topics[0], 'topic') and topics[0].topic: main_topic_rel = next((st for st in topics if st.main), None)
# Для ShoutTopic объектов (старый формат) logger.debug(
# Find first main topic in original order f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}"
main_topic_rel = next((st for st in topics if getattr(st, 'main', False)), None) )
logger.debug(
f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}"
)
if main_topic_rel and main_topic_rel.topic: if main_topic_rel and main_topic_rel.topic:
result = { result = {
"slug": main_topic_rel.topic.slug, "slug": main_topic_rel.topic.slug,
"title": main_topic_rel.topic.title, "title": main_topic_rel.topic.title,
"id": main_topic_rel.topic.id, "id": main_topic_rel.topic.id,
"is_main": True, "is_main": True,
} }
logger.info(f"Returning main topic: {result}") logger.info(f"Returning main topic: {result}")
return result return result
# If no main found but topics exist, return first # If no main found but topics exist, return first
if topics and topics[0].topic: if topics and topics[0].topic:
logger.info(f"No main topic found, using first topic: {topics[0].topic.slug}") logger.info(f"No main topic found, using first topic: {topics[0].topic.slug}")
result = { result = {
"slug": topics[0].topic.slug, "slug": topics[0].topic.slug,
"title": topics[0].topic.title, "title": topics[0].topic.title,
"id": topics[0].topic.id, "id": topics[0].topic.id,
"is_main": True, "is_main": True,
} }
return result return result
else:
# Для Topic объектов (новый формат из selectinload)
# После смены на selectinload у нас просто список Topic объектов
if topics:
logger.info(f"Using first topic as main: {topics[0].slug}")
result = {
"slug": topics[0].slug,
"title": topics[0].title,
"id": topics[0].id,
"is_main": True,
}
return result
logger.warning("No valid topics found, returning default") logger.warning("No valid topics found, returning default")
return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True} return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Снимает публикацию (shout) с публикации.
Предзагружает связанный черновик (draft) и его авторов/темы, чтобы избежать
ошибок при последующем доступе к ним в GraphQL.
Args:
shout_id: ID публикации для снятия с публикации
Returns:
dict: Снятая с публикации публикация или сообщение об ошибке
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
# В идеале нужна проверка прав, имеет ли автор право снимать публикацию
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
# Загружаем Shout со всеми связями для правильного формирования ответа
shout = (
session.query(Shout)
.options(
joinedload(Shout.authors),
selectinload(Shout.topics)
)
.filter(Shout.id == shout_id)
.first()
)
if not shout:
logger.warning(f"Shout not found for unpublish: ID {shout_id}")
return {"error": "Shout not found"}
# Если у публикации есть связанный черновик, загружаем его с relationships
if shout.draft:
# Отдельно загружаем черновик с его связями
draft = (
session.query(Draft)
.options(
selectinload(Draft.authors),
selectinload(Draft.topics)
)
.filter(Draft.id == shout.draft)
.first()
)
# Связываем черновик с публикацией вручную для доступа через API
if draft:
shout.draft_obj = draft
# TODO: Добавить проверку прав доступа, если необходимо
# if author_id not in [a.id for a in shout.authors]: # Требует selectinload(Shout.authors) выше
# logger.warning(f"Author {author_id} denied unpublishing shout {shout_id}")
# return {"error": "Access denied"}
# Запоминаем старый slug и id для формирования поля publication
shout_slug = shout.slug
shout_id_for_publication = shout.id
# Снимаем с публикации (устанавливаем published_at в None)
shout.published_at = None
session.commit()
# Формируем полноценный словарь для ответа
shout_dict = shout.dict()
# Добавляем связанные данные
shout_dict["topics"] = (
[
{"id": topic.id, "slug": topic.slug, "title": topic.title}
for topic in shout.topics
]
if shout.topics
else []
)
# Добавляем main_topic
shout_dict["main_topic"] = get_main_topic(shout.topics)
# Добавляем авторов
shout_dict["authors"] = (
[
{"id": author.id, "name": author.name, "slug": author.slug}
for author in shout.authors
]
if shout.authors
else []
)
# Важно! Обновляем поле publication, отражая состояние "снят с публикации"
shout_dict["publication"] = {
"id": shout_id_for_publication,
"slug": shout_slug,
"published_at": None # Ключевое изменение - устанавливаем published_at в None
}
# Инвалидация кэша
try:
cache_keys = [
"feed", # лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
]
await invalidate_shout_related_cache(shout, author_id)
await invalidate_shouts_cache(cache_keys)
logger.info(f"Cache invalidated after unpublishing shout {shout_id}")
except Exception as cache_err:
logger.error(f"Failed to invalidate cache for unpublish shout {shout_id}: {cache_err}")
except Exception as e:
session.rollback()
logger.error(f"Failed to unpublish shout {shout_id}: {e}", exc_info=True)
return {"error": f"Failed to unpublish shout: {str(e)}"}
# Возвращаем сформированный словарь вместо объекта
logger.info(f"Shout {shout_id} unpublished successfully by author {author_id}")
return {"shout": shout_dict}

View File

@ -0,0 +1,25 @@
{
"include": [
"."
],
"exclude": [
"**/node_modules",
"**/__pycache__",
"**/.*"
],
"defineConstant": {
"DEBUG": true
},
"venvPath": ".",
"venv": ".venv",
"pythonVersion": "3.11",
"typeCheckingMode": "strict",
"reportMissingImports": true,
"reportMissingTypeStubs": false,
"reportUnknownMemberType": false,
"reportUnknownParameterType": false,
"reportUnknownVariableType": false,
"reportUnknownArgumentType": false,
"reportPrivateUsage": false,
"reportUntypedFunctionDecorator": false
}

View File

@ -67,35 +67,30 @@ def add_reaction_stat_columns(q):
return q return q
def get_reactions_with_stat(q, limit=10, offset=0): def get_reactions_with_stat(q, limit, offset):
""" """
Execute the reaction query and retrieve reactions with statistics. Execute the reaction query and retrieve reactions with statistics.
:param q: Query with reactions and statistics. :param q: Query with reactions and statistics.
:param limit: Number of reactions to load. :param limit: Number of reactions to load.
:param offset: Pagination offset. :param offset: Pagination offset.
:return: List of reactions as dictionaries. :return: List of reactions.
>>> get_reactions_with_stat(q, 10, 0) # doctest: +SKIP
[{'id': 1, 'body': 'Текст комментария', 'stat': {'rating': 5, 'comments_count': 3}, ...}]
""" """
q = q.limit(limit).offset(offset) q = q.limit(limit).offset(offset)
reactions = [] reactions = []
with local_session() as session: with local_session() as session:
result_rows = session.execute(q) result_rows = session.execute(q)
for reaction, author, shout, comments_count, rating_stat in result_rows: for reaction, author, shout, commented_stat, rating_stat in result_rows:
# Пропускаем реакции с отсутствующими shout или author # Пропускаем реакции с отсутствующими shout или author
if not shout or not author: if not shout or not author:
logger.error(f"Пропущена реакция из-за отсутствия shout или author: {reaction.dict()}") logger.error(f"Пропущена реакция из-за отсутствия shout или author: {reaction.dict()}")
continue continue
# Преобразуем Reaction в словарь для доступа по ключу reaction.created_by = author.dict()
reaction_dict = reaction.dict() reaction.shout = shout.dict()
reaction_dict["created_by"] = author.dict() reaction.stat = {"rating": rating_stat, "comments": commented_stat}
reaction_dict["shout"] = shout.dict() reactions.append(reaction)
reaction_dict["stat"] = {"rating": rating_stat, "comments_count": comments_count}
reactions.append(reaction_dict)
return reactions return reactions
@ -398,7 +393,7 @@ async def update_reaction(_, info, reaction):
result = session.execute(reaction_query).unique().first() result = session.execute(reaction_query).unique().first()
if result: if result:
r, author, _shout, comments_count, rating_stat = result r, author, _shout, commented_stat, rating_stat = result
if not r or not author: if not r or not author:
return {"error": "Invalid reaction ID or unauthorized"} return {"error": "Invalid reaction ID or unauthorized"}
@ -413,7 +408,7 @@ async def update_reaction(_, info, reaction):
session.commit() session.commit()
r.stat = { r.stat = {
"comments_count": comments_count, "commented": commented_stat,
"rating": rating_stat, "rating": rating_stat,
} }
@ -487,16 +482,12 @@ def apply_reaction_filters(by, q):
shout_slug = by.get("shout") shout_slug = by.get("shout")
if shout_slug: if shout_slug:
q = q.filter(Shout.slug == shout_slug) q = q.filter(Shout.slug == shout_slug)
shout_id = by.get("shout_id")
if shout_id:
q = q.filter(Shout.id == shout_id)
shouts = by.get("shouts") shouts = by.get("shouts")
if shouts: if shouts:
q = q.filter(Shout.slug.in_(shouts)) q = q.filter(Shout.slug.in_(shouts))
created_by = by.get("created_by", by.get("author_id")) created_by = by.get("created_by")
if created_by: if created_by:
q = q.filter(Author.id == created_by) q = q.filter(Author.id == created_by)
@ -621,22 +612,24 @@ async def load_shout_comments(_, info, shout: int, limit=50, offset=0):
@query.field("load_comment_ratings") @query.field("load_comment_ratings")
async def load_comment_ratings(_, info, comment: int, limit=50, offset=0): async def load_comment_ratings(_, info, comment: int, limit=50, offset=0):
""" """
Load ratings for a specified comment with pagination. Load ratings for a specified comment with pagination and statistics.
:param info: GraphQL context info. :param info: GraphQL context info.
:param comment: Comment ID. :param comment: Comment ID.
:param limit: Number of ratings to load. :param limit: Number of ratings to load.
:param offset: Pagination offset. :param offset: Pagination offset.
:return: List of ratings. :return: List of reactions.
""" """
q = query_reactions() q = query_reactions()
q = add_reaction_stat_columns(q)
# Filter, group, sort, limit, offset # Filter, group, sort, limit, offset
q = q.filter( q = q.filter(
and_( and_(
Reaction.deleted_at.is_(None), Reaction.deleted_at.is_(None),
Reaction.reply_to == comment, Reaction.reply_to == comment,
Reaction.kind.in_(RATING_REACTIONS), Reaction.kind == ReactionKind.COMMENT.value,
) )
) )
q = q.group_by(Reaction.id, Author.id, Shout.id) q = q.group_by(Reaction.id, Author.id, Shout.id)
@ -644,187 +637,3 @@ async def load_comment_ratings(_, info, comment: int, limit=50, offset=0):
# Retrieve and return reactions # Retrieve and return reactions
return get_reactions_with_stat(q, limit, offset) return get_reactions_with_stat(q, limit, offset)
@query.field("load_comments_branch")
async def load_comments_branch(
_,
_info,
shout: int,
parent_id: int | None = None,
limit=10,
offset=0,
sort="newest",
children_limit=3,
children_offset=0,
):
"""
Загружает иерархические комментарии с возможностью пагинации корневых и дочерних.
:param info: GraphQL context info.
:param shout: ID статьи.
:param parent_id: ID родительского комментария (None для корневых).
:param limit: Количество комментариев для загрузки.
:param offset: Смещение для пагинации.
:param sort: Порядок сортировки ('newest', 'oldest', 'like').
:param children_limit: Максимальное количество дочерних комментариев.
:param children_offset: Смещение для дочерних комментариев.
:return: Список комментариев с дочерними.
"""
# Создаем базовый запрос
q = query_reactions()
q = add_reaction_stat_columns(q)
# Фильтруем по статье и типу (комментарии)
q = q.filter(
and_(
Reaction.deleted_at.is_(None),
Reaction.shout == shout,
Reaction.kind == ReactionKind.COMMENT.value,
)
)
# Фильтруем по родительскому ID
if parent_id is None:
# Загружаем только корневые комментарии
q = q.filter(Reaction.reply_to.is_(None))
else:
# Загружаем только прямые ответы на указанный комментарий
q = q.filter(Reaction.reply_to == parent_id)
# Сортировка и группировка
q = q.group_by(Reaction.id, Author.id, Shout.id)
# Определяем сортировку
order_by_stmt = None
if sort.lower() == "oldest":
order_by_stmt = asc(Reaction.created_at)
elif sort.lower() == "like":
order_by_stmt = desc("rating_stat")
else: # "newest" по умолчанию
order_by_stmt = desc(Reaction.created_at)
q = q.order_by(order_by_stmt)
# Выполняем запрос для получения комментариев
comments = get_reactions_with_stat(q, limit, offset)
# Если комментарии найдены, загружаем дочерние и количество ответов
if comments:
# Загружаем количество ответов для каждого комментария
await load_replies_count(comments)
# Загружаем дочерние комментарии
await load_first_replies(comments, children_limit, children_offset, sort)
return comments
async def load_replies_count(comments):
"""
Загружает количество ответов для списка комментариев и обновляет поле stat.comments_count.
:param comments: Список комментариев, для которых нужно загрузить количество ответов.
"""
if not comments:
return
comment_ids = [comment["id"] for comment in comments]
# Запрос для подсчета количества ответов
q = (
select(Reaction.reply_to.label("parent_id"), func.count().label("count"))
.where(
and_(
Reaction.reply_to.in_(comment_ids),
Reaction.deleted_at.is_(None),
Reaction.kind == ReactionKind.COMMENT.value,
)
)
.group_by(Reaction.reply_to)
)
# Выполняем запрос
with local_session() as session:
result = session.execute(q).fetchall()
# Создаем словарь {parent_id: count}
replies_count = {row[0]: row[1] for row in result}
# Добавляем значения в комментарии
for comment in comments:
if "stat" not in comment:
comment["stat"] = {}
# Обновляем счетчик комментариев в stat
comment["stat"]["comments_count"] = replies_count.get(comment["id"], 0)
async def load_first_replies(comments, limit, offset, sort="newest"):
"""
Загружает первые N ответов для каждого комментария.
:param comments: Список комментариев, для которых нужно загрузить ответы.
:param limit: Максимальное количество ответов для каждого комментария.
:param offset: Смещение для пагинации дочерних комментариев.
:param sort: Порядок сортировки ответов.
"""
if not comments or limit <= 0:
return
# Собираем ID комментариев
comment_ids = [comment["id"] for comment in comments]
# Базовый запрос для загрузки ответов
q = query_reactions()
q = add_reaction_stat_columns(q)
# Фильтрация: только ответы на указанные комментарии
q = q.filter(
and_(
Reaction.reply_to.in_(comment_ids),
Reaction.deleted_at.is_(None),
Reaction.kind == ReactionKind.COMMENT.value,
)
)
# Группировка
q = q.group_by(Reaction.id, Author.id, Shout.id)
# Определяем сортировку
order_by_stmt = None
if sort.lower() == "oldest":
order_by_stmt = asc(Reaction.created_at)
elif sort.lower() == "like":
order_by_stmt = desc("rating_stat")
else: # "newest" по умолчанию
order_by_stmt = desc(Reaction.created_at)
q = q.order_by(order_by_stmt, Reaction.reply_to)
# Выполняем запрос - указываем limit для неограниченного количества ответов
# но не более 100 на родительский комментарий
replies = get_reactions_with_stat(q, limit=100, offset=0)
# Группируем ответы по родительским ID
replies_by_parent = {}
for reply in replies:
parent_id = reply.get("reply_to")
if parent_id not in replies_by_parent:
replies_by_parent[parent_id] = []
replies_by_parent[parent_id].append(reply)
# Добавляем ответы к соответствующим комментариям с учетом смещения и лимита
for comment in comments:
comment_id = comment["id"]
if comment_id in replies_by_parent:
parent_replies = replies_by_parent[comment_id]
# Применяем смещение и лимит
comment["first_replies"] = parent_replies[offset : offset + limit]
else:
comment["first_replies"] = []
# Загружаем количество ответов для дочерних комментариев
all_replies = [reply for replies in replies_by_parent.values() for reply in replies]
if all_replies:
await load_replies_count(all_replies)

View File

@ -225,7 +225,7 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
elif isinstance(row.stat, dict): elif isinstance(row.stat, dict):
stat = row.stat stat = row.stat
viewed = ViewedStorage.get_shout(shout_id=shout_id) or 0 viewed = ViewedStorage.get_shout(shout_id=shout_id) or 0
shout_dict["stat"] = {**stat, "viewed": viewed} shout_dict["stat"] = {**stat, "viewed": viewed, "commented": stat.get("comments_count", 0)}
# Обработка main_topic и topics # Обработка main_topic и topics
topics = None topics = None

View File

@ -10,7 +10,6 @@ from cache.cache import (
) )
from orm.author import Author from orm.author import Author
from orm.topic import Topic from orm.topic import Topic
from orm.reaction import ReactionKind
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
@ -113,7 +112,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
shouts_stats_query = f""" shouts_stats_query = f"""
SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count
FROM shout_topic st FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))}) WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic GROUP BY st.topic
""" """
@ -122,35 +121,12 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
# Запрос на получение статистики по подписчикам для выбранных тем # Запрос на получение статистики по подписчикам для выбранных тем
followers_stats_query = f""" followers_stats_query = f"""
SELECT topic, COUNT(DISTINCT follower) as followers_count SELECT topic, COUNT(DISTINCT follower) as followers_count
FROM topic_followers tf FROM topic_followers
WHERE topic IN ({",".join(map(str, topic_ids))}) WHERE topic IN ({",".join(map(str, topic_ids))})
GROUP BY topic GROUP BY topic
""" """
followers_stats = {row[0]: row[1] for row in session.execute(text(followers_stats_query))} followers_stats = {row[0]: row[1] for row in session.execute(text(followers_stats_query))}
# Запрос на получение статистики авторов для выбранных тем
authors_stats_query = f"""
SELECT st.topic, COUNT(DISTINCT sa.author) as authors_count
FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
JOIN shout_author sa ON sa.shout = s.id
WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic
"""
authors_stats = {row[0]: row[1] for row in session.execute(text(authors_stats_query))}
# Запрос на получение статистики комментариев для выбранных тем
comments_stats_query = f"""
SELECT st.topic, COUNT(DISTINCT r.id) as comments_count
FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
JOIN reaction r ON r.shout = s.id AND r.kind = '{ReactionKind.COMMENT.value}' AND r.deleted_at IS NULL
JOIN author a ON r.created_by = a.id AND a.deleted_at IS NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic
"""
comments_stats = {row[0]: row[1] for row in session.execute(text(comments_stats_query))}
# Формируем результат с добавлением статистики # Формируем результат с добавлением статистики
result = [] result = []
for topic in topics: for topic in topics:
@ -158,8 +134,6 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
topic_dict["stat"] = { topic_dict["stat"] = {
"shouts": shouts_stats.get(topic.id, 0), "shouts": shouts_stats.get(topic.id, 0),
"followers": followers_stats.get(topic.id, 0), "followers": followers_stats.get(topic.id, 0),
"authors": authors_stats.get(topic.id, 0),
"comments": comments_stats.get(topic.id, 0),
} }
result.append(topic_dict) result.append(topic_dict)
@ -228,6 +202,23 @@ async def get_topics_all(_, _info):
return await get_all_topics() return await get_all_topics()
# Запрос на получение тем с пагинацией и статистикой
@query.field("get_topics_paginated")
async def get_topics_paginated(_, _info, limit=100, offset=0, by=None):
"""
Получает список тем с пагинацией и статистикой.
Args:
limit: Максимальное количество возвращаемых тем
offset: Смещение для пагинации
by: Опциональные параметры сортировки
Returns:
list: Список тем с их статистикой
"""
return await get_topics_with_stats(limit, offset, None, by)
# Запрос на получение тем по сообществу # Запрос на получение тем по сообществу
@query.field("get_topics_by_community") @query.field("get_topics_by_community")
async def get_topics_by_community(_, _info, community_id: int, limit=100, offset=0, by=None): async def get_topics_by_community(_, _info, community_id: int, limit=100, offset=0, by=None):

View File

@ -33,6 +33,7 @@ input DraftInput {
main_topic_id: Int # Changed from main_topic: Topic main_topic_id: Int # Changed from main_topic: Topic
media: [MediaItemInput] # Changed to use MediaItemInput media: [MediaItemInput] # Changed to use MediaItemInput
lead: String lead: String
description: String
subtitle: String subtitle: String
lang: String lang: String
seo: String seo: String
@ -92,14 +93,12 @@ input LoadShoutsOptions {
input ReactionBy { input ReactionBy {
shout: String shout: String
shout_id: Int
shouts: [String] shouts: [String]
search: String search: String
kinds: [ReactionKind] kinds: [ReactionKind]
reply_to: Int # filter reply_to: Int # filter
topic: String topic: String
created_by: Int created_by: Int
author_id: Int
author: String author: String
after: Int after: Int
sort: ReactionSort # sort sort: ReactionSort # sort

View File

@ -26,9 +26,6 @@ type Query {
load_shout_ratings(shout: Int!, limit: Int, offset: Int): [Reaction] load_shout_ratings(shout: Int!, limit: Int, offset: Int): [Reaction]
load_comment_ratings(comment: Int!, limit: Int, offset: Int): [Reaction] load_comment_ratings(comment: Int!, limit: Int, offset: Int): [Reaction]
# branched comments pagination
load_comments_branch(shout: Int!, parent_id: Int, limit: Int, offset: Int, sort: ReactionSort, children_limit: Int, children_offset: Int): [Reaction]
# reader # reader
get_shout(slug: String, shout_id: Int): Shout get_shout(slug: String, shout_id: Int): Shout
load_shouts_by(options: LoadShoutsOptions): [Shout] load_shouts_by(options: LoadShoutsOptions): [Shout]
@ -60,7 +57,7 @@ type Query {
get_topic(slug: String!): Topic get_topic(slug: String!): Topic
get_topics_all: [Topic] get_topics_all: [Topic]
get_topics_by_author(slug: String, user: String, author_id: Int): [Topic] get_topics_by_author(slug: String, user: String, author_id: Int): [Topic]
get_topics_by_community(community_id: Int!, limit: Int, offset: Int): [Topic] get_topics_by_community(slug: String, community_id: Int): [Topic]
# notifier # notifier
load_notifications(after: Int!, limit: Int, offset: Int): NotificationsResult! load_notifications(after: Int!, limit: Int, offset: Int): NotificationsResult!

View File

@ -55,7 +55,6 @@ type Reaction {
stat: Stat stat: Stat
oid: String oid: String
# old_thread: String # old_thread: String
first_replies: [Reaction]
} }
type MediaItem { type MediaItem {
@ -80,6 +79,7 @@ type Shout {
layout: String! layout: String!
lead: String lead: String
description: String
subtitle: String subtitle: String
lang: String lang: String
cover: String cover: String
@ -99,7 +99,6 @@ type Shout {
featured_at: Int featured_at: Int
deleted_at: Int deleted_at: Int
seo: String # generated if not set
version_of: Shout # TODO: use version_of somewhere version_of: Shout # TODO: use version_of somewhere
draft: Draft draft: Draft
media: [MediaItem] media: [MediaItem]
@ -107,22 +106,17 @@ type Shout {
score: Float score: Float
} }
type PublicationInfo {
id: Int!
slug: String!
published_at: Int
}
type Draft { type Draft {
id: Int! id: Int!
created_at: Int! created_at: Int!
created_by: Author! created_by: Author!
community: Community!
layout: String layout: String
slug: String slug: String
title: String title: String
subtitle: String subtitle: String
lead: String lead: String
description: String
body: String body: String
media: [MediaItem] media: [MediaItem]
cover: String cover: String
@ -135,14 +129,14 @@ type Draft {
deleted_at: Int deleted_at: Int
updated_by: Author updated_by: Author
deleted_by: Author deleted_by: Author
authors: [Author]! authors: [Author]
topics: [Topic]! topics: [Topic]
publication: PublicationInfo
} }
type Stat { type Stat {
rating: Int rating: Int
comments_count: Int commented: Int
viewed: Int viewed: Int
last_commented_at: Int last_commented_at: Int
} }

View File

@ -1 +0,0 @@
# This file makes services a Python package

View File

@ -53,71 +53,3 @@ async def notify_follower(follower: dict, author_id: int, action: str = "follow"
except Exception as e: except Exception as e:
# Log the error and re-raise it # Log the error and re-raise it
logger.error(f"Failed to publish to channel {channel_name}: {e}") logger.error(f"Failed to publish to channel {channel_name}: {e}")
async def notify_draft(draft_data, action: str = "publish"):
"""
Отправляет уведомление о публикации или обновлении черновика.
Функция гарантирует, что данные черновика сериализуются корректно, включая
связанные атрибуты (topics, authors).
Args:
draft_data (dict): Словарь с данными черновика. Должен содержать минимум id и title
action (str, optional): Действие ("publish", "update"). По умолчанию "publish"
Returns:
None
Examples:
>>> draft = {"id": 1, "title": "Тестовый черновик", "slug": "test-draft"}
>>> await notify_draft(draft, "publish")
"""
channel_name = "draft"
try:
# Убеждаемся, что все необходимые данные присутствуют
# и объект не требует доступа к отсоединенным атрибутам
if isinstance(draft_data, dict):
draft_payload = draft_data
else:
# Если это ORM объект, преобразуем его в словарь с нужными атрибутами
draft_payload = {
"id": getattr(draft_data, "id", None),
"slug": getattr(draft_data, "slug", None),
"title": getattr(draft_data, "title", None),
"subtitle": getattr(draft_data, "subtitle", None),
"media": getattr(draft_data, "media", None),
"created_at": getattr(draft_data, "created_at", None),
"updated_at": getattr(draft_data, "updated_at", None),
}
# Если переданы связанные атрибуты, добавим их
if hasattr(draft_data, "topics") and draft_data.topics is not None:
draft_payload["topics"] = [
{"id": t.id, "name": t.name, "slug": t.slug}
for t in draft_data.topics
]
if hasattr(draft_data, "authors") and draft_data.authors is not None:
draft_payload["authors"] = [
{
"id": a.id,
"name": a.name,
"slug": a.slug,
"pic": getattr(a, "pic", None),
}
for a in draft_data.authors
]
data = {"payload": draft_payload, "action": action}
# Сохраняем уведомление
save_notification(action, channel_name, data.get("payload"))
# Публикуем в Redis
json_data = orjson.dumps(data)
if json_data:
await redis.publish(channel_name, json_data)
except Exception as e:
logger.error(f"Failed to publish to channel {channel_name}: {e}")

View File

@ -1,19 +0,0 @@
{
"include": ["."],
"exclude": ["**/node_modules", "**/__pycache__", "**/.*"],
"defineConstant": {
"DEBUG": true
},
"venvPath": ".",
"venv": ".venv",
"pythonVersion": "3.11",
"typeCheckingMode": "strict",
"reportMissingImports": true,
"reportMissingTypeStubs": false,
"reportUnknownMemberType": false,
"reportUnknownParameterType": false,
"reportUnknownVariableType": false,
"reportUnknownArgumentType": false,
"reportPrivateUsage": false,
"reportUntypedFunctionDecorator": false
}

View File

@ -1,15 +1,14 @@
from asyncio.log import logger from asyncio.log import logger
import httpx import httpx
from ariadne import MutationType, ObjectType, QueryType from ariadne import MutationType, QueryType
from services.db import create_table_if_not_exists, local_session from services.db import create_table_if_not_exists, local_session
from settings import AUTH_URL from settings import AUTH_URL
query = QueryType() query = QueryType()
mutation = MutationType() mutation = MutationType()
type_draft = ObjectType("Draft") resolvers = [query, mutation]
resolvers = [query, mutation, type_draft]
async def request_graphql_data(gql, url=AUTH_URL, headers=None): async def request_graphql_data(gql, url=AUTH_URL, headers=None):

View File

@ -2,7 +2,9 @@ import asyncio
import os import os
import time import time
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from typing import Dict, Optional from typing import Dict
import orjson
# ga # ga
from google.analytics.data_v1beta import BetaAnalyticsDataClient from google.analytics.data_v1beta import BetaAnalyticsDataClient
@ -18,39 +20,33 @@ from orm.author import Author
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from services.db import local_session from services.db import local_session
from services.redis import redis
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
GOOGLE_KEYFILE_PATH = os.environ.get("GOOGLE_KEYFILE_PATH", "/dump/google-service.json") GOOGLE_KEYFILE_PATH = os.environ.get("GOOGLE_KEYFILE_PATH", "/dump/google-service.json")
GOOGLE_PROPERTY_ID = os.environ.get("GOOGLE_PROPERTY_ID", "") GOOGLE_PROPERTY_ID = os.environ.get("GOOGLE_PROPERTY_ID", "")
VIEWS_FILEPATH = "/dump/views.json"
class ViewedStorage: class ViewedStorage:
"""
Класс для хранения и доступа к данным о просмотрах.
Использует Redis в качестве основного хранилища и Google Analytics для сбора новых данных.
"""
lock = asyncio.Lock() lock = asyncio.Lock()
precounted_by_slug = {}
views_by_shout = {} views_by_shout = {}
shouts_by_topic = {} shouts_by_topic = {}
shouts_by_author = {} shouts_by_author = {}
views = None views = None
period = 60 * 60 # каждый час period = 60 * 60 # каждый час
analytics_client: Optional[BetaAnalyticsDataClient] = None analytics_client: BetaAnalyticsDataClient | None = None
auth_result = None auth_result = None
running = False running = False
redis_views_key = None
last_update_timestamp = 0
start_date = datetime.now().strftime("%Y-%m-%d") start_date = datetime.now().strftime("%Y-%m-%d")
@staticmethod @staticmethod
async def init(): async def init():
"""Подключение к клиенту Google Analytics и загрузка данных о просмотрах из Redis""" """Подключение к клиенту Google Analytics с использованием аутентификации"""
self = ViewedStorage self = ViewedStorage
async with self.lock: async with self.lock:
# Загрузка предварительно подсчитанных просмотров из Redis # Загрузка предварительно подсчитанных просмотров из файла JSON
await self.load_views_from_redis() self.load_precounted_views()
os.environ.setdefault("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_KEYFILE_PATH) os.environ.setdefault("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_KEYFILE_PATH)
if GOOGLE_KEYFILE_PATH and os.path.isfile(GOOGLE_KEYFILE_PATH): if GOOGLE_KEYFILE_PATH and os.path.isfile(GOOGLE_KEYFILE_PATH):
@ -66,54 +62,40 @@ class ViewedStorage:
self.running = False self.running = False
@staticmethod @staticmethod
async def load_views_from_redis(): def load_precounted_views():
"""Загрузка предварительно подсчитанных просмотров из Redis""" """Загрузка предварительно подсчитанных просмотров из файла JSON"""
self = ViewedStorage self = ViewedStorage
viewfile_path = VIEWS_FILEPATH
if not os.path.exists(viewfile_path):
viewfile_path = os.path.join(os.path.curdir, "views.json")
if not os.path.exists(viewfile_path):
logger.warning(" * views.json not found")
return
# Подключаемся к Redis если соединение не установлено logger.info(f" * loading views from {viewfile_path}")
if not redis._client: try:
await redis.connect() start_date_int = os.path.getmtime(viewfile_path)
start_date_str = datetime.fromtimestamp(start_date_int).strftime("%Y-%m-%d")
# Получаем список всех ключей migrated_views_* и находим самый последний self.start_date = start_date_str
keys = await redis.execute("KEYS", "migrated_views_*")
if not keys:
logger.warning(" * No migrated_views keys found in Redis")
return
# Фильтруем только ключи timestamp формата (исключаем migrated_views_slugs)
timestamp_keys = [k for k in keys if k != "migrated_views_slugs"]
if not timestamp_keys:
logger.warning(" * No migrated_views timestamp keys found in Redis")
return
# Сортируем по времени создания (в названии ключа) и берем последний
timestamp_keys.sort()
latest_key = timestamp_keys[-1]
self.redis_views_key = latest_key
# Получаем метку времени создания для установки start_date
timestamp = await redis.execute("HGET", latest_key, "_timestamp")
if timestamp:
self.last_update_timestamp = int(timestamp)
timestamp_dt = datetime.fromtimestamp(int(timestamp))
self.start_date = timestamp_dt.strftime("%Y-%m-%d")
# Если данные сегодняшние, считаем их актуальными
now_date = datetime.now().strftime("%Y-%m-%d") now_date = datetime.now().strftime("%Y-%m-%d")
if now_date == self.start_date:
logger.info(" * Views data is up to date!")
else:
logger.warning(f" * Views data is from {self.start_date}, may need update")
# Выводим информацию о количестве загруженных записей if now_date == self.start_date:
total_entries = await redis.execute("HGET", latest_key, "_total") logger.info(" * views data is up to date!")
if total_entries: else:
logger.info(f" * {total_entries} shouts with views loaded from Redis key: {latest_key}") logger.warn(f" * {viewfile_path} is too old: {self.start_date}")
with open(viewfile_path, "r") as file:
precounted_views = orjson.loads(file.read())
self.precounted_by_slug.update(precounted_views)
logger.info(f" * {len(precounted_views)} shouts with views was loaded.")
except Exception as e:
logger.error(f"precounted views loading error: {e}")
# noinspection PyTypeChecker # noinspection PyTypeChecker
@staticmethod @staticmethod
async def update_pages(): async def update_pages():
"""Запрос всех страниц от Google Analytics, отсортированных по количеству просмотров""" """Запрос всех страниц от Google Analytics, отсортрованных по количеству просмотров"""
self = ViewedStorage self = ViewedStorage
logger.info(" ⎧ views update from Google Analytics ---") logger.info(" ⎧ views update from Google Analytics ---")
if self.running: if self.running:
@ -158,40 +140,15 @@ class ViewedStorage:
self.running = False self.running = False
@staticmethod @staticmethod
async def get_shout(shout_slug="", shout_id=0) -> int: def get_shout(shout_slug="", shout_id=0) -> int:
""" """Получение метрики просмотров shout по slug или id."""
Получение метрики просмотров shout по slug или id.
Args:
shout_slug: Slug публикации
shout_id: ID публикации
Returns:
int: Количество просмотров
"""
self = ViewedStorage self = ViewedStorage
# Получаем данные из Redis для новой схемы хранения
if not redis._client:
await redis.connect()
fresh_views = self.views_by_shout.get(shout_slug, 0) fresh_views = self.views_by_shout.get(shout_slug, 0)
precounted_views = self.precounted_by_slug.get(shout_slug, 0)
# Если есть id, пытаемся получить данные из Redis по ключу migrated_views_<timestamp> return fresh_views + precounted_views
if shout_id and self.redis_views_key:
precounted_views = await redis.execute("HGET", self.redis_views_key, str(shout_id))
if precounted_views:
return fresh_views + int(precounted_views)
# Если нет id или данных, пытаемся получить по slug из отдельного хеша
precounted_views = await redis.execute("HGET", "migrated_views_slugs", shout_slug)
if precounted_views:
return fresh_views + int(precounted_views)
return fresh_views
@staticmethod @staticmethod
async def get_shout_media(shout_slug) -> Dict[str, int]: def get_shout_media(shout_slug) -> Dict[str, int]:
"""Получение метрики воспроизведения shout по slug.""" """Получение метрики воспроизведения shout по slug."""
self = ViewedStorage self = ViewedStorage
@ -200,29 +157,23 @@ class ViewedStorage:
return self.views_by_shout.get(shout_slug, 0) return self.views_by_shout.get(shout_slug, 0)
@staticmethod @staticmethod
async def get_topic(topic_slug) -> int: def get_topic(topic_slug) -> int:
"""Получение суммарного значения просмотров темы.""" """Получение суммарного значения просмотров темы."""
self = ViewedStorage self = ViewedStorage
views_count = 0 return sum(self.views_by_shout.get(shout_slug, 0) for shout_slug in self.shouts_by_topic.get(topic_slug, []))
for shout_slug in self.shouts_by_topic.get(topic_slug, []):
views_count += await self.get_shout(shout_slug=shout_slug)
return views_count
@staticmethod @staticmethod
async def get_author(author_slug) -> int: def get_author(author_slug) -> int:
"""Получение суммарного значения просмотров автора.""" """Получение суммарного значения просмотров автора."""
self = ViewedStorage self = ViewedStorage
views_count = 0 return sum(self.views_by_shout.get(shout_slug, 0) for shout_slug in self.shouts_by_author.get(author_slug, []))
for shout_slug in self.shouts_by_author.get(author_slug, []):
views_count += await self.get_shout(shout_slug=shout_slug)
return views_count
@staticmethod @staticmethod
def update_topics(shout_slug): def update_topics(shout_slug):
"""Обновление счетчиков темы по slug shout""" """Обновление счетчиков темы по slug shout"""
self = ViewedStorage self = ViewedStorage
with local_session() as session: with local_session() as session:
# Определение вспомогательной функции для избежания повторения кода # Определение вспомогательной функции для избежа<EFBFBD><EFBFBD>ия повторения кода
def update_groups(dictionary, key, value): def update_groups(dictionary, key, value):
dictionary[key] = list(set(dictionary.get(key, []) + [value])) dictionary[key] = list(set(dictionary.get(key, []) + [value]))

View File

@ -1,25 +0,0 @@
import pytest
from typing import Dict
@pytest.fixture
def oauth_settings() -> Dict[str, Dict[str, str]]:
"""Тестовые настройки OAuth"""
return {
"GOOGLE": {"id": "test_google_id", "key": "test_google_secret"},
"GITHUB": {"id": "test_github_id", "key": "test_github_secret"},
"FACEBOOK": {"id": "test_facebook_id", "key": "test_facebook_secret"},
}
@pytest.fixture
def frontend_url() -> str:
"""URL фронтенда для тестов"""
return "https://localhost:3000"
@pytest.fixture(autouse=True)
def mock_settings(monkeypatch, oauth_settings, frontend_url):
"""Подменяем настройки для тестов"""
monkeypatch.setattr("auth.oauth.OAUTH_CLIENTS", oauth_settings)
monkeypatch.setattr("auth.oauth.FRONTEND_URL", frontend_url)

View File

@ -1,222 +0,0 @@
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from starlette.responses import JSONResponse, RedirectResponse
from auth.oauth import get_user_profile, oauth_login, oauth_callback
# Подменяем настройки для тестов
with (
patch("auth.oauth.FRONTEND_URL", "https://localhost:3000"),
patch(
"auth.oauth.OAUTH_CLIENTS",
{
"GOOGLE": {"id": "test_google_id", "key": "test_google_secret"},
"GITHUB": {"id": "test_github_id", "key": "test_github_secret"},
"FACEBOOK": {"id": "test_facebook_id", "key": "test_facebook_secret"},
},
),
):
@pytest.fixture
def mock_request():
"""Фикстура для мока запроса"""
request = MagicMock()
request.session = {}
request.path_params = {}
request.query_params = {}
return request
@pytest.fixture
def mock_oauth_client():
"""Фикстура для мока OAuth клиента"""
client = AsyncMock()
client.authorize_redirect = AsyncMock()
client.authorize_access_token = AsyncMock()
client.get = AsyncMock()
return client
@pytest.mark.asyncio
async def test_get_user_profile_google():
"""Тест получения профиля из Google"""
client = AsyncMock()
token = {
"userinfo": {
"sub": "123",
"email": "test@gmail.com",
"name": "Test User",
"picture": "https://lh3.googleusercontent.com/photo=s96",
}
}
profile = await get_user_profile("google", client, token)
assert profile["id"] == "123"
assert profile["email"] == "test@gmail.com"
assert profile["name"] == "Test User"
assert profile["picture"] == "https://lh3.googleusercontent.com/photo=s600"
@pytest.mark.asyncio
async def test_get_user_profile_github():
"""Тест получения профиля из GitHub"""
client = AsyncMock()
client.get.side_effect = [
MagicMock(
json=lambda: {
"id": 456,
"login": "testuser",
"name": "Test User",
"avatar_url": "https://github.com/avatar.jpg",
}
),
MagicMock(
json=lambda: [
{"email": "other@github.com", "primary": False},
{"email": "test@github.com", "primary": True},
]
),
]
profile = await get_user_profile("github", client, {})
assert profile["id"] == "456"
assert profile["email"] == "test@github.com"
assert profile["name"] == "Test User"
assert profile["picture"] == "https://github.com/avatar.jpg"
@pytest.mark.asyncio
async def test_get_user_profile_facebook():
"""Тест получения профиля из Facebook"""
client = AsyncMock()
client.get.return_value = MagicMock(
json=lambda: {
"id": "789",
"name": "Test User",
"email": "test@facebook.com",
"picture": {"data": {"url": "https://facebook.com/photo.jpg"}},
}
)
profile = await get_user_profile("facebook", client, {})
assert profile["id"] == "789"
assert profile["email"] == "test@facebook.com"
assert profile["name"] == "Test User"
assert profile["picture"] == "https://facebook.com/photo.jpg"
@pytest.mark.asyncio
async def test_oauth_login_success(mock_request, mock_oauth_client):
"""Тест успешного начала OAuth авторизации"""
mock_request.path_params["provider"] = "google"
# Настраиваем мок для authorize_redirect
redirect_response = RedirectResponse(url="http://example.com")
mock_oauth_client.authorize_redirect.return_value = redirect_response
with patch("auth.oauth.oauth.create_client", return_value=mock_oauth_client):
response = await oauth_login(mock_request)
assert isinstance(response, RedirectResponse)
assert mock_request.session["provider"] == "google"
assert "code_verifier" in mock_request.session
assert "state" in mock_request.session
mock_oauth_client.authorize_redirect.assert_called_once()
@pytest.mark.asyncio
async def test_oauth_login_invalid_provider(mock_request):
"""Тест с неправильным провайдером"""
mock_request.path_params["provider"] = "invalid"
response = await oauth_login(mock_request)
assert isinstance(response, JSONResponse)
assert response.status_code == 400
assert "Invalid provider" in response.body.decode()
@pytest.mark.asyncio
async def test_oauth_callback_success(mock_request, mock_oauth_client):
"""Тест успешного OAuth callback"""
mock_request.session = {
"provider": "google",
"code_verifier": "test_verifier",
"state": "test_state",
}
mock_request.query_params["state"] = "test_state"
mock_oauth_client.authorize_access_token.return_value = {
"userinfo": {"sub": "123", "email": "test@gmail.com", "name": "Test User"}
}
with (
patch("auth.oauth.oauth.create_client", return_value=mock_oauth_client),
patch("auth.oauth.local_session") as mock_session,
patch("auth.oauth.TokenStorage.create_session", return_value="test_token"),
):
# Мокаем сессию базы данных
session = MagicMock()
session.query.return_value.filter.return_value.first.return_value = None
mock_session.return_value.__enter__.return_value = session
response = await oauth_callback(mock_request)
assert isinstance(response, RedirectResponse)
assert response.status_code == 307
assert "auth/success" in response.headers["location"]
# Проверяем cookie
cookies = response.headers.getlist("set-cookie")
assert any("session_token=test_token" in cookie for cookie in cookies)
assert any("httponly" in cookie.lower() for cookie in cookies)
assert any("secure" in cookie.lower() for cookie in cookies)
# Проверяем очистку сессии
assert "code_verifier" not in mock_request.session
assert "provider" not in mock_request.session
assert "state" not in mock_request.session
@pytest.mark.asyncio
async def test_oauth_callback_invalid_state(mock_request):
"""Тест с неправильным state параметром"""
mock_request.session = {"provider": "google", "state": "correct_state"}
mock_request.query_params["state"] = "wrong_state"
response = await oauth_callback(mock_request)
assert isinstance(response, JSONResponse)
assert response.status_code == 400
assert "Invalid state" in response.body.decode()
@pytest.mark.asyncio
async def test_oauth_callback_existing_user(mock_request, mock_oauth_client):
"""Тест OAuth callback с существующим пользователем"""
mock_request.session = {
"provider": "google",
"code_verifier": "test_verifier",
"state": "test_state",
}
mock_request.query_params["state"] = "test_state"
mock_oauth_client.authorize_access_token.return_value = {
"userinfo": {"sub": "123", "email": "test@gmail.com", "name": "Test User"}
}
with (
patch("auth.oauth.oauth.create_client", return_value=mock_oauth_client),
patch("auth.oauth.local_session") as mock_session,
patch("auth.oauth.TokenStorage.create_session", return_value="test_token"),
):
# Мокаем существующего пользователя
existing_user = MagicMock()
session = MagicMock()
session.query.return_value.filter.return_value.first.return_value = existing_user
mock_session.return_value.__enter__.return_value = session
response = await oauth_callback(mock_request)
assert isinstance(response, RedirectResponse)
assert response.status_code == 307
# Проверяем обновление существующего пользователя
assert existing_user.name == "Test User"
assert existing_user.oauth == "google:123"
assert existing_user.email_verified is True

View File

@ -1,9 +0,0 @@
"""Тестовые настройки для OAuth"""
FRONTEND_URL = "https://localhost:3000"
OAUTH_CLIENTS = {
"GOOGLE": {"id": "test_google_id", "key": "test_google_secret"},
"GITHUB": {"id": "test_github_id", "key": "test_github_secret"},
"FACEBOOK": {"id": "test_facebook_id", "key": "test_facebook_secret"},
}

View File

@ -1,7 +1,17 @@
import asyncio import asyncio
import os
import pytest import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from starlette.testclient import TestClient
from main import app
from services.db import Base
from services.redis import redis from services.redis import redis
from tests.test_config import get_test_client
# Use SQLite for testing
TEST_DB_URL = "sqlite:///test.db"
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -13,36 +23,38 @@ def event_loop():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def test_app(): def test_engine():
"""Create a test client and session factory.""" """Create a test database engine."""
client, SessionLocal = get_test_client() engine = create_engine(TEST_DB_URL)
return client, SessionLocal Base.metadata.create_all(engine)
yield engine
Base.metadata.drop_all(engine)
os.remove("test.db")
@pytest.fixture @pytest.fixture
def db_session(test_app): def db_session(test_engine):
"""Create a new database session for a test.""" """Create a new database session for a test."""
_, SessionLocal = test_app connection = test_engine.connect()
session = SessionLocal() transaction = connection.begin()
session = Session(bind=connection)
yield session yield session
session.rollback()
session.close() session.close()
transaction.rollback()
connection.close()
@pytest.fixture
def test_client(test_app):
"""Get the test client."""
client, _ = test_app
return client
@pytest.fixture @pytest.fixture
async def redis_client(): async def redis_client():
"""Create a test Redis client.""" """Create a test Redis client."""
await redis.connect() await redis.connect()
await redis.flushall() # Очищаем Redis перед каждым тестом
yield redis yield redis
await redis.flushall() # Очищаем после теста
await redis.disconnect() await redis.disconnect()
@pytest.fixture
def test_client():
"""Create a TestClient instance."""
return TestClient(app)

View File

@ -1,67 +0,0 @@
"""
Конфигурация для тестов
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.testclient import TestClient
# Используем in-memory SQLite для тестов
TEST_DB_URL = "sqlite:///:memory:"
class DatabaseMiddleware(BaseHTTPMiddleware):
"""Middleware для внедрения сессии БД"""
def __init__(self, app, session_maker):
super().__init__(app)
self.session_maker = session_maker
async def dispatch(self, request, call_next):
session = self.session_maker()
request.state.db = session
try:
response = await call_next(request)
finally:
session.close()
return response
def create_test_app():
"""Create a test Starlette application."""
from services.db import Base
# Создаем движок и таблицы
engine = create_engine(
TEST_DB_URL,
connect_args={"check_same_thread": False},
poolclass=StaticPool,
echo=False,
)
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
# Создаем фабрику сессий
SessionLocal = sessionmaker(bind=engine)
# Создаем middleware для сессий
middleware = [Middleware(DatabaseMiddleware, session_maker=SessionLocal)]
# Создаем тестовое приложение
app = Starlette(
debug=True,
middleware=middleware,
routes=[], # Здесь можно добавить тестовые маршруты если нужно
)
return app, SessionLocal
def get_test_client():
"""Get a test client with initialized database."""
app, SessionLocal = create_test_app()
return TestClient(app), SessionLocal

View File

@ -53,11 +53,7 @@ async def test_create_reaction(test_client, db_session, test_setup):
} }
""", """,
"variables": { "variables": {
"reaction": { "reaction": {"shout": test_setup["shout"].id, "kind": ReactionKind.LIKE.value, "body": "Great post!"}
"shout": test_setup["shout"].id,
"kind": ReactionKind.LIKE.value,
"body": "Great post!",
}
}, },
}, },
) )

70
tests/test_validations.py Normal file
View File

@ -0,0 +1,70 @@
from datetime import datetime, timedelta
import pytest
from pydantic import ValidationError
from auth.validations import (
AuthInput,
AuthResponse,
TokenPayload,
UserRegistrationInput,
)
class TestAuthValidations:
def test_auth_input(self):
"""Test basic auth input validation"""
# Valid case
auth = AuthInput(user_id="123", username="testuser", token="1234567890abcdef1234567890abcdef")
assert auth.user_id == "123"
assert auth.username == "testuser"
# Invalid cases
with pytest.raises(ValidationError):
AuthInput(user_id="", username="test", token="x" * 32)
with pytest.raises(ValidationError):
AuthInput(user_id="123", username="t", token="x" * 32)
def test_user_registration(self):
"""Test user registration validation"""
# Valid case
user = UserRegistrationInput(email="test@example.com", password="SecurePass123!", name="Test User")
assert user.email == "test@example.com"
assert user.name == "Test User"
# Test email validation
with pytest.raises(ValidationError) as exc:
UserRegistrationInput(email="invalid-email", password="SecurePass123!", name="Test")
assert "Invalid email format" in str(exc.value)
# Test password validation
with pytest.raises(ValidationError) as exc:
UserRegistrationInput(email="test@example.com", password="weak", name="Test")
assert "String should have at least 8 characters" in str(exc.value)
def test_token_payload(self):
"""Test token payload validation"""
now = datetime.utcnow()
exp = now + timedelta(hours=1)
payload = TokenPayload(user_id="123", username="testuser", exp=exp, iat=now)
assert payload.user_id == "123"
assert payload.username == "testuser"
assert payload.scopes == [] # Default empty list
def test_auth_response(self):
"""Test auth response validation"""
# Success case
success_resp = AuthResponse(success=True, token="valid_token", user={"id": "123", "name": "Test"})
assert success_resp.success is True
assert success_resp.token == "valid_token"
# Error case
error_resp = AuthResponse(success=False, error="Invalid credentials")
assert error_resp.success is False
assert error_resp.error == "Invalid credentials"
# Invalid case - отсутствует обязательное поле token при success=True
with pytest.raises(ValidationError):
AuthResponse(success=True, user={"id": "123", "name": "Test"})

View File

@ -1 +0,0 @@

View File

@ -1,38 +0,0 @@
"""
Модуль для обработки HTML-фрагментов
"""
def wrap_html_fragment(fragment: str) -> str:
"""
Оборачивает HTML-фрагмент в полную HTML-структуру для корректной обработки.
Args:
fragment: HTML-фрагмент для обработки
Returns:
str: Полный HTML-документ
Example:
>>> wrap_html_fragment("<p>Текст параграфа</p>")
'<!DOCTYPE html><html><head><meta charset="utf-8"></head><body><p>Текст параграфа</p></body></html>'
"""
if not fragment or not fragment.strip():
return fragment
# Проверяем, является ли контент полным HTML-документом
is_full_html = fragment.strip().startswith('<!DOCTYPE') or fragment.strip().startswith('<html')
# Если это фрагмент, оборачиваем его в полный HTML-документ
if not is_full_html:
return f"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title></title>
</head>
<body>
{fragment}
</body>
</html>"""
return fragment