welcomecenterbot/handlers/messages_routing.py

49 lines
1.6 KiB
Python
Raw Normal View History

2024-06-03 10:27:42 +00:00
import logging
import math
2024-01-07 09:19:46 +00:00
from bot.api import telegram_api
from bot.config import FEEDBACK_CHAT_ID
2024-02-12 12:50:35 +00:00
from nlp.toxicity import text2toxicity
from nlp.replying import get_toxic_reply
2024-01-07 09:19:46 +00:00
from handlers.handle_private import handle_private
2024-06-03 10:27:42 +00:00
logger = logging.getLogger('handlers.messages_routing')
logging.basicConfig(level=logging.DEBUG)
2024-01-07 09:19:46 +00:00
async def messages_routing(msg, state):
cid = msg["chat"]["id"]
uid = msg["from"]["id"]
text = msg.get("text")
if cid == uid:
# сообщения в личке с ботом
logger.info("private chat message")
await handle_private(msg, state)
elif str(cid) == FEEDBACK_CHAT_ID:
# сообщения из группы обратной связи
logger.info("feedback chat message")
logger.debug(msg)
reply_msg = msg.get("reply_to_message")
if reply_msg:
reply_chat_id = reply_msg.get("chat", {}).get("id")
if reply_chat_id != FEEDBACK_CHAT_ID:
await telegram_api("sendMessage", chat_id=reply_chat_id, text=text, reply_to=reply_msg.get("message_id"))
2024-02-12 12:50:35 +00:00
elif bool(text):
toxic_score = text2toxicity(text)
2024-02-12 13:12:19 +00:00
logger.info(f'\ntext: {text}\ntoxic: {math.floor(toxic_score*100)}%')
2024-02-12 12:50:35 +00:00
if toxic_score > 0.71:
toxic_reply = get_toxic_reply(toxic_score)
await telegram_api(
"setMessageReaction",
chat_id=cid,
is_big=True,
message_id=msg.get("message_id"),
reaction=f'[{{"type":"emoji", "emoji":"{toxic_reply}"}}]'
)
2024-01-07 09:19:46 +00:00
else:
pass