From 0a21993c5b22aa82a2bd74b96e008091605580a4 Mon Sep 17 00:00:00 2001 From: Untone Date: Thu, 26 Sep 2024 20:17:42 +0300 Subject: [PATCH] toxic-debug --- handlers/messages_routing.py | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/handlers/messages_routing.py b/handlers/messages_routing.py index bcd05d5..4446e08 100644 --- a/handlers/messages_routing.py +++ b/handlers/messages_routing.py @@ -35,31 +35,36 @@ async def messages_routing(msg, state): latest_toxic_message_id = await redis.get(f"toxic:{cid}") pattern = f"toxic:{cid}:{uid}:*" scores = [] + scoring_msg_id = 0 async for key in redis.scan_iter(pattern): scr = await redis.get(key) if isinstance(scr, int): scores.append(scr) logger.debug(f'found {len(scores)} messages') toxic_score = math.floor(sum(scores)/len(scores)) if scores else 0 - scoring_msg_id = reply_msg.get("message_id", int(latest_toxic_message_id)) if reply_msg else int(latest_toxic_message_id) - msg_toxic_key = f"toxic:{cid}:{uid}:{scoring_msg_id}" - logger.debug('msg_toxic_key: ', msg_toxic_key) - one_score = await redis.get(msg_toxic_key) - logger.debug('one_score: ', one_score) - emoji = '😳' if toxic_score > 90 else '😟' if toxic_score > 80 else '😏' if toxic_score > 60 else '🙂' if toxic_score > 20 else '😇' - text = f"{int(one_score or 0)}% токсичности\nСредняя токсичность сообщений: {toxic_score}% {emoji}" + if reply_msg: + scoring_msg_id = reply_msg.get("message_id") + if not scoring_msg_id and latest_toxic_message_id: + scoring_msg_id = int(latest_toxic_message_id) + if scoring_msg_id: + msg_toxic_key = f"toxic:{cid}:{uid}:{scoring_msg_id}" + logger.debug('msg_toxic_key: ', msg_toxic_key) + one_score = await redis.get(msg_toxic_key) + logger.debug('one_score: ', one_score) + emoji = '😳' if toxic_score > 90 else '😟' if toxic_score > 80 else '😏' if toxic_score > 60 else '🙂' if toxic_score > 20 else '😇' + text = f"{int(one_score or 0)}% токсичности\nСредняя токсичность сообщений: {toxic_score}% {emoji}" + if text: + await telegram_api( + "sendMessage", + chat_id=cid, + reply_to_message_id=scoring_msg_id, + text=text + ) await telegram_api( "deleteMessage", chat_id=cid, message_id=mid ) - if text: - await telegram_api( - "sendMessage", - chat_id=cid, - reply_to_message_id=scoring_msg_id, - text=text - ) else: toxic_score = detector(text) toxic_perc = math.floor(toxic_score*100)