This commit is contained in:
2024-02-21 19:14:58 +03:00
parent 88cd6e1060
commit 214af0cf51
33 changed files with 750 additions and 773 deletions

View File

@@ -9,31 +9,31 @@ from services.logger import root_logger as logger
async def request_data(gql, headers=None):
if headers is None:
headers = {"Content-Type": "application/json"}
headers = {'Content-Type': 'application/json'}
try:
async with httpx.AsyncClient() as client:
response = await client.post(AUTH_URL, json=gql, headers=headers)
if response.status_code == 200:
data = response.json()
errors = data.get("errors")
errors = data.get('errors')
if errors:
logger.error(f"HTTP Errors: {errors}")
logger.error(f'HTTP Errors: {errors}')
else:
return data
except Exception as e:
# Handling and logging exceptions during authentication check
logger.error(f"request_data error: {e}")
logger.error(f'request_data error: {e}')
return None
# Создание региона кэша с TTL 30 секунд
region = make_region().configure("dogpile.cache.memory", expiration_time=30)
region = make_region().configure('dogpile.cache.memory', expiration_time=30)
# Функция-ключ для кэширования
def auth_cache_key(req):
token = req.headers.get("Authorization")
return f"auth_token:{token}"
token = req.headers.get('Authorization')
return f'auth_token:{token}'
# Декоратор для кэширования запроса проверки токена
@@ -55,32 +55,27 @@ def cache_auth_request(f):
# Измененная функция проверки аутентификации с кэшированием
@cache_auth_request
async def check_auth(req):
token = req.headers.get("Authorization")
user_id = ""
token = req.headers.get('Authorization')
user_id = ''
user_roles = []
if token:
try:
# Logging the authentication token
logger.debug(f"{token}")
query_name = "validate_jwt_token"
operation = "ValidateToken"
variables = {
"params": {
"token_type": "access_token",
"token": token,
}
}
logger.debug(f'{token}')
query_name = 'validate_jwt_token'
operation = 'ValidateToken'
variables = {'params': {'token_type': 'access_token', 'token': token}}
gql = {
"query": f"query {operation}($params: ValidateJWTTokenInput!) {{ {query_name}(params: $params) {{ is_valid claims }} }}",
"variables": variables,
"operationName": operation,
'query': f'query {operation}($params: ValidateJWTTokenInput!) {{ {query_name}(params: $params) {{ is_valid claims }} }}',
'variables': variables,
'operationName': operation,
}
data = await request_data(gql)
if data:
user_data = data.get("data", {}).get(query_name, {}).get("claims", {})
user_id = user_data.get("sub")
user_roles = user_data.get("allowed_roles")
user_data = data.get('data', {}).get(query_name, {}).get('claims', {})
user_id = user_data.get('sub')
user_roles = user_data.get('allowed_roles')
except Exception as e:
import traceback
@@ -92,41 +87,41 @@ async def check_auth(req):
async def add_user_role(user_id):
logger.info(f"add author role for user_id: {user_id}")
query_name = "_update_user"
operation = "UpdateUserRoles"
logger.info(f'add author role for user_id: {user_id}')
query_name = '_update_user'
operation = 'UpdateUserRoles'
headers = {
"Content-Type": "application/json",
"x-authorizer-admin-secret": ADMIN_SECRET,
'Content-Type': 'application/json',
'x-authorizer-admin-secret': ADMIN_SECRET,
}
variables = {"params": {"roles": "author, reader", "id": user_id}}
variables = {'params': {'roles': 'author, reader', 'id': user_id}}
gql = {
"query": f"mutation {operation}($params: UpdateUserInput!) {{ {query_name}(params: $params) {{ id roles }} }}",
"variables": variables,
"operationName": operation,
'query': f'mutation {operation}($params: UpdateUserInput!) {{ {query_name}(params: $params) {{ id roles }} }}',
'variables': variables,
'operationName': operation,
}
data = await request_data(gql, headers)
if data:
user_id = data.get("data", {}).get(query_name, {}).get("id")
user_id = data.get('data', {}).get(query_name, {}).get('id')
return user_id
def login_required(f):
@wraps(f)
async def decorated_function(*args, **kwargs):
user_id = ""
user_id = ''
user_roles = []
info = args[1]
try:
req = info.context.get("request")
req = info.context.get('request')
[user_id, user_roles] = await check_auth(req)
except Exception as e:
logger.error(f"Failed to authenticate user: {e}")
logger.error(f'Failed to authenticate user: {e}')
if user_id:
logger.info(f" got {user_id} roles: {user_roles}")
info.context["user_id"] = user_id.strip()
info.context["roles"] = user_roles
logger.info(f' got {user_id} roles: {user_roles}')
info.context['user_id'] = user_id.strip()
info.context['roles'] = user_roles
return await f(*args, **kwargs)
return decorated_function
@@ -135,7 +130,7 @@ def login_required(f):
def auth_request(f):
@wraps(f)
async def decorated_function(*args, **kwargs):
user_id = ""
user_id = ''
user_roles = []
req = {}
try:
@@ -145,11 +140,11 @@ def auth_request(f):
import traceback
traceback.print_exc()
logger.error(f"Failed to authenticate user: {args} {e}")
logger.error(f'Failed to authenticate user: {args} {e}')
if user_id:
logger.info(f" got {user_id} roles: {user_roles}")
req["user_id"] = user_id.strip()
req["roles"] = user_roles
logger.info(f' got {user_id} roles: {user_roles}')
req['user_id'] = user_id.strip()
req['roles'] = user_roles
return await f(*args, **kwargs)
return decorated_function

View File

@@ -14,11 +14,11 @@ from services.logger import root_logger as logger
from settings import DB_URL
# Создание региона кэша с TTL 300 секунд
cache_region = make_region().configure("dogpile.cache.memory", expiration_time=300)
cache_region = make_region().configure('dogpile.cache.memory', expiration_time=300)
# Подключение к базе данных SQLAlchemy
engine = create_engine(DB_URL, echo=False, pool_size=10, max_overflow=20)
T = TypeVar("T")
T = TypeVar('T')
REGISTRY: Dict[str, type] = {}
Base = declarative_base()
@@ -29,9 +29,9 @@ def profile_sqlalchemy_queries(threshold=0.1):
def wrapper(*args, **kw):
elapsed, stat_loader, result = _profile(fn, threshold, *args, **kw)
if elapsed is not None:
print(f"Query took {elapsed:.3f} seconds to execute.")
print(f'Query took {elapsed:.3f} seconds to execute.')
stats = stat_loader()
stats.sort_stats("cumulative")
stats.sort_stats('cumulative')
stats.print_stats()
return result
@@ -52,14 +52,14 @@ def _profile(fn, threshold, *args, **kw):
# Перехватчики для журнала запросов SQLAlchemy
@event.listens_for(Engine, "before_cursor_execute")
@event.listens_for(Engine, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
conn._query_start_time = time.time()
@event.listens_for(Engine, "after_cursor_execute")
@event.listens_for(Engine, 'after_cursor_execute')
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
if hasattr(conn, "_query_start_time"):
if hasattr(conn, '_query_start_time'):
elapsed = time.time() - conn._query_start_time
del conn._query_start_time
if elapsed > 0.2: # Adjust threshold as needed
@@ -71,7 +71,7 @@ def after_cursor_execute(conn, cursor, statement, parameters, context, executema
profiler(statement, parameters)
def local_session(src=""):
def local_session(src=''):
return Session(bind=engine, expire_on_commit=False)
@@ -82,7 +82,7 @@ class Base(declarative_base()):
__init__: Callable
__allow_unmapped__ = True
__abstract__ = True
__table_args__ = {"extend_existing": True}
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
@@ -91,12 +91,12 @@ class Base(declarative_base()):
def dict(self) -> Dict[str, Any]:
column_names = self.__table__.columns.keys()
if "_sa_instance_state" in column_names:
column_names.remove("_sa_instance_state")
if '_sa_instance_state' in column_names:
column_names.remove('_sa_instance_state')
try:
return {c: getattr(self, c) for c in column_names}
except Exception as e:
logger.error(f"Error occurred while converting object to dictionary: {e}")
logger.error(f'Error occurred while converting object to dictionary: {e}')
return {}
def update(self, values: Dict[str, Any]) -> None:

View File

@@ -29,19 +29,19 @@ def apply_diff(original, diff):
The modified string.
"""
result = []
pattern = re.compile(r"^(\+|-) ")
pattern = re.compile(r'^(\+|-) ')
for line in diff:
match = pattern.match(line)
if match:
op = match.group(1)
content = line[2:]
if op == "+":
if op == '+':
result.append(content)
elif op == "-":
elif op == '-':
# Ignore deleted lines
pass
else:
result.append(line)
return " ".join(result)
return ' '.join(result)

View File

@@ -12,48 +12,48 @@ from services.rediscache import redis
from services.viewed import ViewedStorage
@event.listens_for(Author, "after_insert")
@event.listens_for(Author, "after_update")
@event.listens_for(Author, 'after_insert')
@event.listens_for(Author, 'after_update')
def after_author_update(mapper, connection, author: Author):
redis_key = f"user:{author.user}:author"
redis_key = f'user:{author.user}:author'
asyncio.create_task(
redis.execute(
"set",
'set',
redis_key,
json.dumps(
{
"id": author.id,
"name": author.name,
"slug": author.slug,
"pic": author.pic,
'id': author.id,
'name': author.name,
'slug': author.slug,
'pic': author.pic,
}
),
)
)
@event.listens_for(TopicFollower, "after_insert")
@event.listens_for(TopicFollower, 'after_insert')
def after_topic_follower_insert(mapper, connection, target: TopicFollower):
asyncio.create_task(
handle_topic_follower_change(connection, target.topic, target.follower, True)
)
@event.listens_for(TopicFollower, "after_delete")
@event.listens_for(TopicFollower, 'after_delete')
def after_topic_follower_delete(mapper, connection, target: TopicFollower):
asyncio.create_task(
handle_topic_follower_change(connection, target.topic, target.follower, False)
)
@event.listens_for(AuthorFollower, "after_insert")
@event.listens_for(AuthorFollower, 'after_insert')
def after_author_follower_insert(mapper, connection, target: AuthorFollower):
asyncio.create_task(
handle_author_follower_change(connection, target.author, target.follower, True)
)
@event.listens_for(AuthorFollower, "after_delete")
@event.listens_for(AuthorFollower, 'after_delete')
def after_author_follower_delete(mapper, connection, target: AuthorFollower):
asyncio.create_task(
handle_author_follower_change(connection, target.author, target.follower, False)
@@ -63,26 +63,26 @@ def after_author_follower_delete(mapper, connection, target: AuthorFollower):
async def update_follows_for_user(
connection, user_id, entity_type, entity: dict, is_insert
):
redis_key = f"user:{user_id}:follows"
redis_key = f'user:{user_id}:follows'
follows_str = await redis.get(redis_key)
if follows_str:
follows = json.loads(follows_str)
else:
follows = {
"topics": [],
"authors": [],
"communities": [
{"slug": "discours", "name": "Дискурс", "id": 1, "desc": ""}
'topics': [],
'authors': [],
'communities': [
{'slug': 'discours', 'name': 'Дискурс', 'id': 1, 'desc': ''}
],
}
if is_insert:
follows[f"{entity_type}s"].append(entity)
follows[f'{entity_type}s'].append(entity)
else:
# Remove the entity from follows
follows[f"{entity_type}s"] = [
e for e in follows[f"{entity_type}s"] if e["id"] != entity["id"]
follows[f'{entity_type}s'] = [
e for e in follows[f'{entity_type}s'] if e['id'] != entity['id']
]
await redis.execute("set", redis_key, json.dumps(follows))
await redis.execute('set', redis_key, json.dumps(follows))
async def handle_author_follower_change(connection, author_id, follower_id, is_insert):
@@ -93,17 +93,24 @@ async def handle_author_follower_change(connection, author_id, follower_id, is_i
q
).first()
author.stat = {
"shouts": shouts_stat,
"viewed": await ViewedStorage.get_author(author.slug),
"followers": followers_stat,
"followings": followings_stat,
'shouts': shouts_stat,
'viewed': await ViewedStorage.get_author(author.slug),
'followers': followers_stat,
'followings': followings_stat,
}
follower = await conn.execute(
select(Author).filter(Author.id == follower_id)
).first()
if follower and author:
await update_follows_for_user(
connection, follower.user, "author", author.dict(), is_insert
connection, follower.user, 'author', {
"id": author.id,
"name": author.name,
"slug": author.slug,
"pic": author.pic,
"bio": author.bio,
"stat": author.stat
}, is_insert
)
@@ -115,30 +122,37 @@ async def handle_topic_follower_change(connection, topic_id, follower_id, is_ins
q
).first()
topic.stat = {
"shouts": shouts_stat,
"authors": authors_stat,
"followers": followers_stat,
"viewed": await ViewedStorage.get_topic(topic.slug),
'shouts': shouts_stat,
'authors': authors_stat,
'followers': followers_stat,
'viewed': await ViewedStorage.get_topic(topic.slug),
}
follower = connection.execute(
select(Author).filter(Author.id == follower_id)
).first()
if follower and topic:
await update_follows_for_user(
connection, follower.user, "topic", topic.dict(), is_insert
connection, follower.user, 'topic', {
"id": topic.id,
"title": topic.title,
"slug": topic.slug,
"body": topic.body,
"stat": topic.stat
}, is_insert
)
BATCH_SIZE = 33
class FollowsCached:
lock = asyncio.Lock()
@staticmethod
async def update_cache():
BATCH_SIZE = 30 # Adjust batch size as needed
with local_session() as session:
authors = session.query(Author).all()
total_authors = len(authors)
for i in range(0, total_authors, BATCH_SIZE):
q = select(Author)
q = add_author_stat_columns(q)
authors = session.execute(q)
for i in range(0, len(authors), BATCH_SIZE):
batch_authors = authors[i : i + BATCH_SIZE]
await asyncio.gather(
*[
@@ -149,24 +163,26 @@ class FollowsCached:
@staticmethod
async def update_author_cache(author: Author):
redis_key = f"user:{author.user}:author"
redis_key = f'user:{author.user}:author'
if isinstance(author, Author):
await redis.execute(
"set",
'set',
redis_key,
json.dumps(
{
"id": author.id,
"name": author.name,
"slug": author.slug,
"pic": author.pic,
'id': author.id,
'name': author.name,
'slug': author.slug,
'pic': author.pic,
'bio': author.bio,
'stat': author.stat
}
),
)
follows = await get_author_follows(None, None, user=author.user)
if isinstance(follows, dict):
redis_key = f"user:{author.user}:follows"
await redis.execute("set", redis_key, json.dumps(follows))
redis_key = f'user:{author.user}:follows'
await redis.execute('set', redis_key, json.dumps(follows))
@staticmethod
async def worker():
@@ -178,7 +194,7 @@ class FollowsCached:
await asyncio.sleep(10 * 60 * 60)
except asyncio.CancelledError:
# Handle cancellation due to SIGTERM
logger.info("Cancellation requested. Cleaning up...")
logger.info('Cancellation requested. Cleaning up...')
# Perform any necessary cleanup before exiting the loop
break
except Exception as exc:

View File

@@ -3,45 +3,45 @@ import colorlog
# Define the color scheme
color_scheme = {
"DEBUG": "light_black",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
'DEBUG': 'light_black',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
# Define secondary log colors
secondary_colors = {
"log_name": {"DEBUG": "blue"},
"asctime": {"DEBUG": "cyan"},
"process": {"DEBUG": "purple"},
"module": {"DEBUG": "light_black,bg_blue"},
'log_name': {'DEBUG': 'blue'},
'asctime': {'DEBUG': 'cyan'},
'process': {'DEBUG': 'purple'},
'module': {'DEBUG': 'light_black,bg_blue'},
}
# Define the log format string
fmt_string = "%(log_color)s%(levelname)s: %(log_color)s[%(module)s]%(reset)s %(white)s%(message)s"
fmt_string = '%(log_color)s%(levelname)s: %(log_color)s[%(module)s]%(reset)s %(white)s%(message)s'
# Define formatting configuration
fmt_config = {
"log_colors": color_scheme,
"secondary_log_colors": secondary_colors,
"style": "%",
"reset": True,
'log_colors': color_scheme,
'secondary_log_colors': secondary_colors,
'style': '%',
'reset': True,
}
class MultilineColoredFormatter(colorlog.ColoredFormatter):
def format(self, record):
# Check if the message is multiline
if record.getMessage() and "\n" in record.getMessage():
if record.getMessage() and '\n' in record.getMessage():
# Split the message into lines
lines = record.getMessage().split("\n")
lines = record.getMessage().split('\n')
formatted_lines = []
for line in lines:
# Format each line with the provided format
formatted_lines.append(super().format(record))
# Join the formatted lines
return "\n".join(formatted_lines)
return '\n'.join(formatted_lines)
else:
# If not multiline or no message, use the default formatting
return super().format(record)
@@ -55,7 +55,7 @@ stream = logging.StreamHandler()
stream.setFormatter(formatter)
def get_colorful_logger(name="main"):
def get_colorful_logger(name='main'):
# Create and configure the logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)

View File

@@ -3,43 +3,43 @@ import json
from services.rediscache import redis
async def notify_reaction(reaction, action: str = "create"):
channel_name = "reaction"
data = {"payload": reaction, "action": action}
async def notify_reaction(reaction, action: str = 'create'):
channel_name = 'reaction'
data = {'payload': reaction, 'action': action}
try:
await redis.publish(channel_name, json.dumps(data))
except Exception as e:
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
async def notify_shout(shout, action: str = "update"):
channel_name = "shout"
data = {"payload": shout, "action": action}
async def notify_shout(shout, action: str = 'update'):
channel_name = 'shout'
data = {'payload': shout, 'action': action}
try:
await redis.publish(channel_name, json.dumps(data))
except Exception as e:
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
async def notify_follower(follower: dict, author_id: int, action: str = "follow"):
channel_name = f"follower:{author_id}"
async def notify_follower(follower: dict, author_id: int, action: str = 'follow'):
channel_name = f'follower:{author_id}'
try:
# Simplify dictionary before publishing
simplified_follower = {k: follower[k] for k in ["id", "name", "slug", "pic"]}
simplified_follower = {k: follower[k] for k in ['id', 'name', 'slug', 'pic']}
data = {"payload": simplified_follower, "action": action}
data = {'payload': simplified_follower, 'action': action}
# Convert data to JSON string
json_data = json.dumps(data)
# Ensure the data is not empty before publishing
if not json_data:
raise ValueError("Empty data to publish.")
raise ValueError('Empty data to publish.')
# Use the 'await' keyword when publishing
await redis.publish(channel_name, json_data)
except Exception as e:
# Log the error and re-raise it
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
raise

View File

@@ -20,11 +20,11 @@ class RedisCache:
async def execute(self, command, *args, **kwargs):
if self._client:
try:
logger.debug(f"{command} {args} {kwargs}")
logger.debug(f'{command} {args} {kwargs}')
for arg in args:
if isinstance(arg, dict):
if arg.get("_sa_instance_state"):
del arg["_sa_instance_state"]
if arg.get('_sa_instance_state'):
del arg['_sa_instance_state']
r = await self._client.execute_command(command, *args, **kwargs)
logger.debug(type(r))
logger.debug(r)
@@ -55,4 +55,4 @@ class RedisCache:
redis = RedisCache()
__all__ = ["redis"]
__all__ = ['redis']

View File

@@ -7,69 +7,60 @@ from opensearchpy import OpenSearch
from services.logger import root_logger as logger
from services.rediscache import redis
ELASTIC_HOST = os.environ.get("ELASTIC_HOST", "").replace("https://", "")
ELASTIC_USER = os.environ.get("ELASTIC_USER", "")
ELASTIC_PASSWORD = os.environ.get("ELASTIC_PASSWORD", "")
ELASTIC_PORT = os.environ.get("ELASTIC_PORT", 9200)
ELASTIC_AUTH = f"{ELASTIC_USER}:{ELASTIC_PASSWORD}" if ELASTIC_USER else ""
ELASTIC_HOST = os.environ.get('ELASTIC_HOST', '').replace('https://', '')
ELASTIC_USER = os.environ.get('ELASTIC_USER', '')
ELASTIC_PASSWORD = os.environ.get('ELASTIC_PASSWORD', '')
ELASTIC_PORT = os.environ.get('ELASTIC_PORT', 9200)
ELASTIC_AUTH = f'{ELASTIC_USER}:{ELASTIC_PASSWORD}' if ELASTIC_USER else ''
ELASTIC_URL = os.environ.get(
"ELASTIC_URL", f"https://{ELASTIC_AUTH}@{ELASTIC_HOST}:{ELASTIC_PORT}"
'ELASTIC_URL', f'https://{ELASTIC_AUTH}@{ELASTIC_HOST}:{ELASTIC_PORT}'
)
REDIS_TTL = 86400 # 1 day in seconds
index_settings = {
"settings": {
"index": {
"number_of_shards": 1,
"auto_expand_replicas": "0-all",
},
"analysis": {
"analyzer": {
"ru": {
"tokenizer": "standard",
"filter": ["lowercase", "ru_stop", "ru_stemmer"],
'settings': {
'index': {'number_of_shards': 1, 'auto_expand_replicas': '0-all'},
'analysis': {
'analyzer': {
'ru': {
'tokenizer': 'standard',
'filter': ['lowercase', 'ru_stop', 'ru_stemmer'],
}
},
"filter": {
"ru_stemmer": {
"type": "stemmer",
"language": "russian",
},
"ru_stop": {
"type": "stop",
"stopwords": "_russian_",
},
'filter': {
'ru_stemmer': {'type': 'stemmer', 'language': 'russian'},
'ru_stop': {'type': 'stop', 'stopwords': '_russian_'},
},
},
},
"mappings": {
"properties": {
"body": {"type": "text", "analyzer": "ru"},
"title": {"type": "text", "analyzer": "ru"},
'mappings': {
'properties': {
'body': {'type': 'text', 'analyzer': 'ru'},
'title': {'type': 'text', 'analyzer': 'ru'},
# 'author': {'type': 'text'},
}
},
}
expected_mapping = index_settings["mappings"]
expected_mapping = index_settings['mappings']
class SearchService:
def __init__(self, index_name="search_index"):
def __init__(self, index_name='search_index'):
self.index_name = index_name
self.manager = Manager()
self.client = None
# Используем менеджер для создания Lock и Value
self.lock = self.manager.Lock()
self.initialized_flag = self.manager.Value("i", 0)
self.initialized_flag = self.manager.Value('i', 0)
# Only initialize the instance if it's not already initialized
if not self.initialized_flag.value and ELASTIC_HOST:
try:
self.client = OpenSearch(
hosts=[{"host": ELASTIC_HOST, "port": ELASTIC_PORT}],
hosts=[{'host': ELASTIC_HOST, 'port': ELASTIC_PORT}],
http_compress=True,
http_auth=(ELASTIC_USER, ELASTIC_PASSWORD),
use_ssl=True,
@@ -78,34 +69,34 @@ class SearchService:
ssl_show_warn=False,
# ca_certs = ca_certs_path
)
logger.info(" Клиент OpenSearch.org подключен")
logger.info(' Клиент OpenSearch.org подключен')
if self.lock.acquire(blocking=False):
try:
self.check_index()
finally:
self.lock.release()
else:
logger.debug(" проверка пропущена")
logger.debug(' проверка пропущена')
except Exception as exc:
logger.error(f" {exc}")
logger.error(f' {exc}')
self.client = None
def info(self):
if isinstance(self.client, OpenSearch):
logger.info(" Поиск подключен") # : {self.client.info()}')
logger.info(' Поиск подключен') # : {self.client.info()}')
else:
logger.info(" * Задайте переменные среды для подключения к серверу поиска")
logger.info(' * Задайте переменные среды для подключения к серверу поиска')
def delete_index(self):
if self.client:
logger.debug(f" Удаляем индекс {self.index_name}")
logger.debug(f' Удаляем индекс {self.index_name}')
self.client.indices.delete(index=self.index_name, ignore_unavailable=True)
def create_index(self):
if self.client:
if self.lock.acquire(blocking=False):
try:
logger.debug(f" Создаём новый индекс: {self.index_name} ")
logger.debug(f' Создаём новый индекс: {self.index_name} ')
self.client.indices.create(
index=self.index_name, body=index_settings
)
@@ -114,11 +105,11 @@ class SearchService:
finally:
self.lock.release()
else:
logger.debug(" ..")
logger.debug(' ..')
def put_mapping(self):
if self.client:
logger.debug(f" Разметка индекации {self.index_name}")
logger.debug(f' Разметка индекации {self.index_name}')
self.client.indices.put_mapping(
index=self.index_name, body=expected_mapping
)
@@ -142,36 +133,28 @@ class SearchService:
finally:
self.lock.release()
else:
logger.debug(" ..")
logger.debug(' ..')
def index(self, shout):
if self.client:
id_ = str(shout.id)
logger.debug(f" Индексируем пост {id_}")
logger.debug(f' Индексируем пост {id_}')
self.client.index(index=self.index_name, id=id_, body=shout.dict())
async def search(self, text, limit, offset):
logger.debug(f" Ищем: {text}")
search_body = {
"query": {"match": {"_all": text}},
}
logger.debug(f' Ищем: {text}')
search_body = {'query': {'match': {'_all': text}}}
if self.client:
search_response = self.client.search(
index=self.index_name, body=search_body, size=limit, from_=offset
)
hits = search_response["hits"]["hits"]
hits = search_response['hits']['hits']
results = [
{
**hit["_source"],
"score": hit["_score"],
}
for hit in hits
]
results = [{**hit['_source'], 'score': hit['_score']} for hit in hits]
# Use Redis as cache with TTL
redis_key = f"search:{text}"
await redis.execute("SETEX", redis_key, REDIS_TTL, json.dumps(results))
redis_key = f'search:{text}'
await redis.execute('SETEX', redis_key, REDIS_TTL, json.dumps(results))
return []

View File

@@ -26,5 +26,5 @@ def start_sentry():
],
)
except Exception as e:
print("[services.sentry] init error")
print('[services.sentry] init error')
print(e)

View File

@@ -4,7 +4,7 @@ from services.rediscache import redis
async def get_unread_counter(chat_id: str, author_id: int) -> int:
r = await redis.execute("LLEN", f"chats/{chat_id}/unread/{author_id}")
r = await redis.execute('LLEN', f'chats/{chat_id}/unread/{author_id}')
if isinstance(r, str):
return int(r)
elif isinstance(r, int):
@@ -14,7 +14,7 @@ async def get_unread_counter(chat_id: str, author_id: int) -> int:
async def get_total_unread_counter(author_id: int) -> int:
chats_set = await redis.execute("SMEMBERS", f"chats_by_author/{author_id}")
chats_set = await redis.execute('SMEMBERS', f'chats_by_author/{author_id}')
s = 0
if isinstance(chats_set, str):
chats_set = json.loads(chats_set)

View File

@@ -20,9 +20,9 @@ from orm.topic import Topic
from services.db import local_session
from services.logger import root_logger as logger
GOOGLE_KEYFILE_PATH = os.environ.get("GOOGLE_KEYFILE_PATH", "/dump/google-service.json")
GOOGLE_PROPERTY_ID = os.environ.get("GOOGLE_PROPERTY_ID", "")
VIEWS_FILEPATH = "/dump/views.json"
GOOGLE_KEYFILE_PATH = os.environ.get('GOOGLE_KEYFILE_PATH', '/dump/google-service.json')
GOOGLE_PROPERTY_ID = os.environ.get('GOOGLE_PROPERTY_ID', '')
VIEWS_FILEPATH = '/dump/views.json'
class ViewedStorage:
@@ -42,12 +42,12 @@ class ViewedStorage:
"""Подключение к клиенту Google Analytics с использованием аутентификации"""
self = ViewedStorage
async with self.lock:
os.environ.setdefault("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_KEYFILE_PATH)
os.environ.setdefault('GOOGLE_APPLICATION_CREDENTIALS', GOOGLE_KEYFILE_PATH)
if GOOGLE_KEYFILE_PATH and os.path.isfile(GOOGLE_KEYFILE_PATH):
# Using a default constructor instructs the client to use the credentials
# specified in GOOGLE_APPLICATION_CREDENTIALS environment variable.
self.analytics_client = BetaAnalyticsDataClient()
logger.info(" * Клиент Google Analytics успешно авторизован")
logger.info(' * Клиент Google Analytics успешно авторизован')
# Загрузка предварительно подсчитанных просмотров из файла JSON
self.load_precounted_views()
@@ -55,19 +55,19 @@ class ViewedStorage:
if os.path.exists(VIEWS_FILEPATH):
file_timestamp = os.path.getctime(VIEWS_FILEPATH)
self.start_date = datetime.fromtimestamp(file_timestamp).strftime(
"%Y-%m-%d"
'%Y-%m-%d'
)
now_date = datetime.now().strftime("%Y-%m-%d")
now_date = datetime.now().strftime('%Y-%m-%d')
if now_date == self.start_date:
logger.info(" * Данные актуализованы!")
logger.info(' * Данные актуализованы!')
else:
logger.info(f" * Миграция проводилась: {self.start_date}")
logger.info(f' * Миграция проводилась: {self.start_date}')
# Запуск фоновой задачи
asyncio.create_task(self.worker())
else:
logger.info(" * Пожалуйста, добавьте ключевой файл Google Analytics")
logger.info(' * Пожалуйста, добавьте ключевой файл Google Analytics')
self.disabled = True
@staticmethod
@@ -75,31 +75,31 @@ class ViewedStorage:
"""Загрузка предварительно подсчитанных просмотров из файла JSON"""
self = ViewedStorage
try:
with open(VIEWS_FILEPATH, "r") as file:
with open(VIEWS_FILEPATH, 'r') as file:
precounted_views = json.load(file)
self.views_by_shout.update(precounted_views)
logger.info(
f" * {len(precounted_views)} публикаций с просмотрами успешно загружены."
f' * {len(precounted_views)} публикаций с просмотрами успешно загружены.'
)
except Exception as e:
logger.error(f"Ошибка загрузки предварительно подсчитанных просмотров: {e}")
logger.error(f'Ошибка загрузки предварительно подсчитанных просмотров: {e}')
@staticmethod
async def update_pages():
"""Запрос всех страниц от Google Analytics, отсортированных по количеству просмотров"""
self = ViewedStorage
logger.info(" ⎧ Обновление данных просмотров от Google Analytics ---")
logger.info(' ⎧ Обновление данных просмотров от Google Analytics ---')
if not self.disabled:
try:
start = time.time()
async with self.lock:
if self.analytics_client:
request = RunReportRequest(
property=f"properties/{GOOGLE_PROPERTY_ID}",
dimensions=[Dimension(name="pagePath")],
metrics=[Metric(name="screenPageViews")],
property=f'properties/{GOOGLE_PROPERTY_ID}',
dimensions=[Dimension(name='pagePath')],
metrics=[Metric(name='screenPageViews')],
date_ranges=[
DateRange(start_date=self.start_date, end_date="today")
DateRange(start_date=self.start_date, end_date='today')
],
)
response = self.analytics_client.run_report(request)
@@ -113,7 +113,7 @@ class ViewedStorage:
# Извлечение путей страниц из ответа Google Analytics
if isinstance(row.dimension_values, list):
page_path = row.dimension_values[0].value
slug = page_path.split("discours.io/")[-1]
slug = page_path.split('discours.io/')[-1]
views_count = int(row.metric_values[0].value)
# Обновление данных в хранилище
@@ -126,10 +126,10 @@ class ViewedStorage:
# Запись путей страниц для логирования
slugs.add(slug)
logger.info(f" ⎪ Собрано страниц: {len(slugs)} ")
logger.info(f' ⎪ Собрано страниц: {len(slugs)} ')
end = time.time()
logger.info(" ⎪ Обновление страниц заняло %fs " % (end - start))
logger.info(' ⎪ Обновление страниц заняло %fs ' % (end - start))
except Exception as error:
logger.error(error)
@@ -209,18 +209,18 @@ class ViewedStorage:
failed = 0
except Exception as _exc:
failed += 1
logger.info(" - Обновление не удалось #%d, ожидание 10 секунд" % failed)
logger.info(' - Обновление не удалось #%d, ожидание 10 секунд' % failed)
if failed > 3:
logger.info(" - Больше не пытаемся обновить")
logger.info(' - Больше не пытаемся обновить')
break
if failed == 0:
when = datetime.now(timezone.utc) + timedelta(seconds=self.period)
t = format(when.astimezone().isoformat())
logger.info(
" ⎩ Следующее обновление: %s"
% (t.split("T")[0] + " " + t.split("T")[1].split(".")[0])
' ⎩ Следующее обновление: %s'
% (t.split('T')[0] + ' ' + t.split('T')[1].split('.')[0])
)
await asyncio.sleep(self.period)
else:
await asyncio.sleep(10)
logger.info(" - Попытка снова обновить данные")
logger.info(' - Попытка снова обновить данные')

View File

@@ -15,13 +15,13 @@ class WebhookEndpoint(HTTPEndpoint):
try:
data = await request.json()
if data:
auth = request.headers.get("Authorization")
auth = request.headers.get('Authorization')
if auth:
if auth == os.environ.get("WEBHOOK_SECRET"):
user_id: str = data["user"]["id"]
name: str = data["user"]["given_name"]
slug: str = data["user"]["email"].split("@")[0]
slug: str = re.sub("[^0-9a-z]+", "-", slug.lower())
if auth == os.environ.get('WEBHOOK_SECRET'):
user_id: str = data['user']['id']
name: str = data['user']['given_name']
slug: str = data['user']['email'].split('@')[0]
slug: str = re.sub('[^0-9a-z]+', '-', slug.lower())
with local_session() as session:
author = (
session.query(Author)
@@ -29,12 +29,12 @@ class WebhookEndpoint(HTTPEndpoint):
.first()
)
if author:
slug = slug + "-" + user_id.split("-").pop()
slug = slug + '-' + user_id.split('-').pop()
await create_author(user_id, slug, name)
return JSONResponse({"status": "success"})
return JSONResponse({'status': 'success'})
except Exception as e:
import traceback
traceback.print_exc()
return JSONResponse({"status": "error", "message": str(e)}, status_code=500)
return JSONResponse({'status': 'error', 'message': str(e)}, status_code=500)