welcomecenterbot/handlers/messages_routing.py
2024-09-26 18:00:21 +03:00

90 lines
3.4 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import logging
import math
from store import redis
from bot.api import telegram_api
from bot.config import FEEDBACK_CHAT_ID
from nlp.toxicity_detector import detector
from handlers.handle_private import handle_private
logger = logging.getLogger('handlers.messages_routing')
logging.basicConfig(level=logging.DEBUG)
async def messages_routing(msg, state):
cid = msg["chat"]["id"]
uid = msg["from"]["id"]
text = msg.get("text")
reply_msg = msg.get("reply_to_message")
if cid == uid:
# сообщения в личке с ботом
logger.info("private chat message")
await handle_private(msg, state)
elif str(cid) == FEEDBACK_CHAT_ID:
# сообщения из группы обратной связи
logger.info("feedback chat message")
logger.debug(msg)
if reply_msg:
reply_chat_id = reply_msg.get("chat", {}).get("id")
if reply_chat_id != FEEDBACK_CHAT_ID:
await telegram_api("sendMessage", chat_id=reply_chat_id, text=text, reply_to_message_id=reply_msg.get("message_id"))
elif bool(text):
mid = msg.get("message_id")
if text == '/toxic@welcomecenter_bot':
text = ''
toxic_score = 0
if not reply_msg:
logger.debug(f'scoring average for {uid}')
scoring_msg_id = mid
pattern = f"toxic:{cid}:{uid}:*"
scores = []
async for key in redis.scan_iter(pattern):
scr = int(await redis.get(key))
scores.append(scr)
toxic_score = math.floor(sum(scores)/len(scores)) if scores else 0
text = f"Средняя токсичность сообщений: {toxic_score}%"
else:
latest_toxic_message_id = await redis.get(f"toxic:{cid}")
scoring_msg_id = reply_msg.get("message_id") or latest_toxic_message_id
toxic_score = await redis.get(f"toxic:{cid}:{uid}:{scoring_msg_id}")
if toxic_score:
text = f"{int(toxic_score)}% токсичности"
if text:
await telegram_api(
"sendMessage",
chat_id=cid,
reply_to_message_id=scoring_msg_id,
text=text
)
await telegram_api(
"deleteMessage",
chat_id=cid,
message_id=mid
)
else:
toxic_score = detector(text)
toxic_perc = math.floor(toxic_score*100)
await redis.set(f"toxic:{cid}", mid)
await redis.set(f"toxic:{cid}:{uid}:{mid}", toxic_perc, ex=60*60*24*3)
logger.info(f'\ntext: {text}\ntoxic: {toxic_perc}%')
if toxic_score > 0.81:
if toxic_score > 0.90:
await redis.set(f"removed:{uid}:{cid}:{mid}", text)
await telegram_api(
"deleteMessage",
chat_id=cid,
message_id=mid
)
else:
await telegram_api(
"setMessageReaction",
chat_id=cid,
is_big=True,
message_id=mid,
reaction=f'[{{"type":"emoji", "emoji":"🙉"}}]'
)
else:
pass