toxicity-detector

This commit is contained in:
2024-02-12 15:50:35 +03:00
parent ec82174dc9
commit 7fd358931a
10 changed files with 166 additions and 255 deletions

View File

@@ -1,6 +1,7 @@
from bot.api import telegram_api
from bot.config import FEEDBACK_CHAT_ID
from nlp.toxicity import text2toxicity
from nlp.replying import get_toxic_reply
import logging
from handlers.handle_private import handle_private
@@ -28,5 +29,17 @@ async def messages_routing(msg, state):
if reply_chat_id != FEEDBACK_CHAT_ID:
await telegram_api("sendMessage", chat_id=reply_chat_id, text=text, reply_to=reply_msg.get("message_id"))
elif bool(text):
toxic_score = text2toxicity(text)
if toxic_score > 0.71:
toxic_reply = get_toxic_reply(toxic_score)
await telegram_api(
"setMessageReaction",
chat_id=cid,
is_big=True,
message_id=msg.get("message_id"),
reaction=f'[{{"type":"emoji", "emoji":"{toxic_reply}"}}]'
)
else:
pass