120 lines
4.5 KiB
Python
120 lines
4.5 KiB
Python
import logging
|
||
import math
|
||
from state.redis import redis
|
||
from state.scan import get_average_pattern
|
||
from bot.api import telegram_api
|
||
from bot.config import FEEDBACK_CHAT_ID
|
||
from handlers.handle_private import handle_private
|
||
from nlp.toxicity_detector import detector
|
||
# from nlp.normalize import normalize
|
||
|
||
logger = logging.getLogger("handlers.messages_routing")
|
||
|
||
|
||
async def messages_routing(msg, state):
|
||
cid = msg["chat"]["id"]
|
||
uid = msg["from"]["id"]
|
||
text = msg.get("text", msg.get("caption"))
|
||
reply_msg = msg.get("reply_to_message")
|
||
|
||
if cid == uid:
|
||
# сообщения в личке с ботом
|
||
logger.info("private chat message: ", msg)
|
||
await handle_private(msg, state)
|
||
|
||
elif str(cid) == FEEDBACK_CHAT_ID:
|
||
# сообщения из группы обратной связи
|
||
logger.info("feedback chat message: ", msg)
|
||
logger.debug(msg)
|
||
if reply_msg:
|
||
reply_chat_id = reply_msg.get("chat", {}).get("id")
|
||
if reply_chat_id != FEEDBACK_CHAT_ID:
|
||
await telegram_api(
|
||
"sendMessage",
|
||
chat_id=reply_chat_id,
|
||
text=text,
|
||
reply_to_message_id=reply_msg.get("message_id"),
|
||
)
|
||
|
||
elif bool(text):
|
||
mid = msg.get("message_id")
|
||
if text == "/toxic@welcomecenter_bot":
|
||
# latest in chat
|
||
latest_toxic_message_id = await redis.get(f"toxic:{cid}")
|
||
|
||
# reply_to message_id
|
||
reply_to_msg_id = mid
|
||
if reply_msg:
|
||
reply_to_msg_id = reply_msg.get("message_id")
|
||
if not reply_to_msg_id and latest_toxic_message_id:
|
||
reply_to_msg_id = int(latest_toxic_message_id)
|
||
try:
|
||
# count average between all of messages
|
||
toxic_pattern = f"toxic:{cid}:{uid}:*"
|
||
toxic_score = await get_average_pattern(toxic_pattern)
|
||
except Exception:
|
||
pass
|
||
|
||
# current mesasage toxicity
|
||
if reply_to_msg_id:
|
||
one_score = await redis.get(f"toxic:{cid}:{uid}:{reply_to_msg_id}")
|
||
reply_text = ""
|
||
if one_score:
|
||
logger.debug(one_score)
|
||
reply_text += f"{int(one_score)}% токсичности\n"
|
||
if toxic_score:
|
||
emoji = (
|
||
"😳"
|
||
if toxic_score > 90
|
||
else "😟"
|
||
if toxic_score > 80
|
||
else "😏"
|
||
if toxic_score > 60
|
||
else "🙂"
|
||
if toxic_score > 20
|
||
else "😇"
|
||
)
|
||
reply_text += (
|
||
f"Средняя токсичность сообщений: {toxic_score}% {emoji}"
|
||
)
|
||
if reply_text:
|
||
await telegram_api(
|
||
"sendMessage",
|
||
chat_id=cid,
|
||
reply_to_message_id=reply_to_msg_id,
|
||
text=reply_text,
|
||
)
|
||
try:
|
||
await telegram_api("deleteMessage", chat_id=cid, message_id=mid)
|
||
except Exception:
|
||
pass
|
||
elif text == "/removed@welcomecenter_bot":
|
||
try:
|
||
await telegram_api("deleteMessage", chat_id=cid, message_id=mid)
|
||
except Exception:
|
||
pass
|
||
else:
|
||
toxic_score = detector(text)
|
||
toxic_perc = math.floor(toxic_score * 100)
|
||
await redis.set(f"toxic:{cid}", mid)
|
||
await redis.set(f"toxic:{cid}:{uid}:{mid}", toxic_perc, ex=60 * 60 * 24 * 3)
|
||
logger.info(f"\ntext: {text}\ntoxic: {toxic_perc}%")
|
||
if toxic_score > 0.81:
|
||
if toxic_score > 0.90:
|
||
await redis.set(f"removed:{uid}:{cid}:{mid}", text)
|
||
try:
|
||
await telegram_api("deleteMessage", chat_id=cid, message_id=mid)
|
||
except Exception:
|
||
pass
|
||
else:
|
||
await telegram_api(
|
||
"setMessageReaction",
|
||
chat_id=cid,
|
||
is_big=True,
|
||
message_id=mid,
|
||
reaction='[{"type":"emoji", "emoji":"🙉"}]',
|
||
)
|
||
|
||
else:
|
||
pass
|