welcomecenterbot/handlers/messages_routing.py
2024-09-26 20:49:31 +03:00

86 lines
3.5 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import logging
import math
from store import redis, get_average_toxic
from bot.api import telegram_api
from bot.config import FEEDBACK_CHAT_ID
from nlp.toxicity_detector import detector
from handlers.handle_private import handle_private
logger = logging.getLogger('handlers.messages_routing')
logging.basicConfig(level=logging.DEBUG)
async def messages_routing(msg, state):
cid = msg["chat"]["id"]
uid = msg["from"]["id"]
text = msg.get("text")
reply_msg = msg.get("reply_to_message")
if cid == uid:
# сообщения в личке с ботом
logger.info("private chat message: ", msg)
await handle_private(msg, state)
elif str(cid) == FEEDBACK_CHAT_ID:
# сообщения из группы обратной связи
logger.info("feedback chat message: ", msg)
logger.debug(msg)
if reply_msg:
reply_chat_id = reply_msg.get("chat", {}).get("id")
if reply_chat_id != FEEDBACK_CHAT_ID:
await telegram_api("sendMessage", chat_id=reply_chat_id, text=text, reply_to_message_id=reply_msg.get("message_id"))
elif bool(text):
mid = msg.get("message_id")
if text == '/toxic@welcomecenter_bot':
latest_toxic_message_id = await redis.get(f"toxic:{cid}")
toxic_score = await get_average_toxic(msg)
if reply_msg:
scoring_msg_id = reply_msg.get("message_id")
if not scoring_msg_id and latest_toxic_message_id:
scoring_msg_id = int(latest_toxic_message_id)
if scoring_msg_id:
msg_toxic_key = f"toxic:{cid}:{uid}:{scoring_msg_id}"
logger.debug('msg_toxic_key: ', msg_toxic_key)
one_score = await redis.get(msg_toxic_key)
if one_score:
msg_score = int(one_score[0])
logger.debug('one_score: ', msg_score)
emoji = '😳' if toxic_score > 90 else '😟' if toxic_score > 80 else '😏' if toxic_score > 60 else '🙂' if toxic_score > 20 else '😇'
text = f"{msg_score}% токсичности\nСредняя токсичность сообщений: {toxic_score}% {emoji}"
await telegram_api(
"sendMessage",
chat_id=cid,
reply_to_message_id=scoring_msg_id,
text=text
)
await telegram_api(
"deleteMessage",
chat_id=cid,
message_id=mid
)
else:
toxic_score = detector(text)
toxic_perc = math.floor(toxic_score*100)
await redis.set(f"toxic:{cid}", mid)
await redis.set(f"toxic:{cid}:{uid}:{mid}", toxic_perc, ex=60*60*24*3)
logger.info(f'\ntext: {text}\ntoxic: {toxic_perc}%')
if toxic_score > 0.81:
if toxic_score > 0.90:
await redis.set(f"removed:{uid}:{cid}:{mid}", text)
await telegram_api(
"deleteMessage",
chat_id=cid,
message_id=mid
)
else:
await telegram_api(
"setMessageReaction",
chat_id=cid,
is_big=True,
message_id=mid,
reaction=f'[{{"type":"emoji", "emoji":"🙉"}}]'
)
else:
pass