import logging import math from store import redis from bot.api import telegram_api from bot.config import FEEDBACK_CHAT_ID from nlp.toxicity_detector import detector from handlers.handle_private import handle_private logger = logging.getLogger('handlers.messages_routing') logging.basicConfig(level=logging.DEBUG) async def messages_routing(msg, state): cid = msg["chat"]["id"] uid = msg["from"]["id"] text = msg.get("text") reply_msg = msg.get("reply_to_message") if cid == uid: # сообщения в личке с ботом logger.info("private chat message", msg) await handle_private(msg, state) elif str(cid) == FEEDBACK_CHAT_ID: # сообщения из группы обратной связи logger.info("feedback chat message") logger.debug(msg) if reply_msg: reply_chat_id = reply_msg.get("chat", {}).get("id") if reply_chat_id != FEEDBACK_CHAT_ID: await telegram_api("sendMessage", chat_id=reply_chat_id, text=text, reply_to_message_id=reply_msg.get("message_id")) elif bool(text): mid = msg.get("message_id") if text == '/toxic@welcomecenter_bot': latest_toxic_message_id = await redis.get(f"toxic:{cid}") pattern = f"toxic:{cid}:{uid}:*" scores = [] found = await redis.scan_iter(pattern) logger.debug(f'found {len(found)} messages') for key in found: scr = await redis.get(key) if isinstance(scr, int): scores.append(scr) toxic_score = math.floor(sum(scores)/len(scores)) if scores else 0 scoring_msg_id = reply_msg.get("message_id", int(latest_toxic_message_id)) if reply_msg else int(latest_toxic_message_id) msg_toxic_key = f"toxic:{cid}:{uid}:{scoring_msg_id}" logger.debug(msg_toxic_key) one_score = await redis.get(msg_toxic_key) logger.debug(one_score) emoji = '😳' if toxic_score > 90 else '😟' if toxic_score > 80 else '😏' if toxic_score > 60 else '🙂' if toxic_score > 20 else '😇' text = f"{int(one_score or 0)}% токсичности\nСредняя токсичность сообщений: {toxic_score}% {emoji}" await telegram_api( "deleteMessage", chat_id=cid, message_id=mid ) if text: await telegram_api( "sendMessage", chat_id=cid, reply_to_message_id=scoring_msg_id, text=text ) else: toxic_score = detector(text) toxic_perc = math.floor(toxic_score*100) await redis.set(f"toxic:{cid}", mid) await redis.set(f"toxic:{cid}:{uid}:{mid}", toxic_perc, ex=60*60*24*3) logger.info(f'\ntext: {text}\ntoxic: {toxic_perc}%') if toxic_score > 0.81: if toxic_score > 0.90: await redis.set(f"removed:{uid}:{cid}:{mid}", text) await telegram_api( "deleteMessage", chat_id=cid, message_id=mid ) else: await telegram_api( "setMessageReaction", chat_id=cid, is_big=True, message_id=mid, reaction=f'[{{"type":"emoji", "emoji":"🙉"}}]' ) else: pass