refresh
This commit is contained in:
parent
aa8f9f8adb
commit
1c8bc26c64
|
@ -5,7 +5,7 @@ from bot.config import BOT_TOKEN
|
|||
import logging
|
||||
|
||||
# Create a logger instance
|
||||
logger = logging.getLogger('[tgbot.api] ')
|
||||
logger = logging.getLogger('bot.api')
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
api_base = f"https://api.telegram.org/bot{BOT_TOKEN}/"
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
import logging
|
||||
import math
|
||||
|
||||
from bot.api import telegram_api
|
||||
from bot.config import FEEDBACK_CHAT_ID
|
||||
from nlp.toxicity import text2toxicity
|
||||
from nlp.replying import get_toxic_reply
|
||||
import logging
|
||||
import math
|
||||
|
||||
from handlers.handle_private import handle_private
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
logger = logging.getLogger('handlers.messages_routing')
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
async def messages_routing(msg, state):
|
||||
|
|
9
main.py
9
main.py
|
@ -7,17 +7,18 @@ from handlers.handle_join_request import handle_join_request, handle_reaction_on
|
|||
from handlers.messages_routing import messages_routing
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger('[main] ')
|
||||
logger = logging.getLogger('main')
|
||||
state = dict()
|
||||
|
||||
|
||||
async def main():
|
||||
logger.info("\tstarted")
|
||||
async with ClientSession() as session:
|
||||
offset = 0 # начальное значение offset
|
||||
while True:
|
||||
reponse = await telegram_api("getUpdates", offset=offset, allowed_updates=['message', 'message_reaction'])
|
||||
if isinstance(reponse, dict):
|
||||
result = reponse.get("result", [])
|
||||
response = await telegram_api("getUpdates", offset=offset, allowed_updates=['message', 'edited_message', 'message_reaction','chat_join_request', 'chat_member'])
|
||||
if isinstance(response, dict):
|
||||
result = response.get("result", [])
|
||||
for update in result:
|
||||
try:
|
||||
message = update.get("message", update.get("edited_message"))
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
# "👍", "👎", "❤", "🔥", "🥰", "👏", "😁",
|
||||
# "🤔", "🤯", "😱", "🤬", "😢", "🎉", "🤩",
|
||||
# "🤮", "💩", "🙏", "👌", "🕊", "🤡", "🥱",
|
||||
# "🥴", "😍", "🐳", "❤🔥", "🌚", "🌭", "💯",
|
||||
# "🤣", "⚡", "🍌", "🏆", "💔", "🤨", "😐",
|
||||
# "🍓", "🍾", "💋", "🖕", "😈", "😴", "😭",
|
||||
# "🤓", "👻", "👨💻", "👀", "🎃", "🙈", "😇",
|
||||
# "😨", "🤝", "✍", "🤗", "🫡", "🎅", "🎄",
|
||||
# "☃", "💅", "🤪", "🗿", "🆒", "💘", "🙉",
|
||||
# "🦄", "😘", "💊", "🙊", "😎", "👾", "🤷♂",
|
||||
# "🤷", "🤷♀", "😡"
|
||||
|
||||
toxic_reactions = {
|
||||
"071": "🕊",
|
||||
"073": "👀",
|
||||
"075": "🙈",
|
||||
"077": "🙊",
|
||||
"079": "🙏",
|
||||
"081": "🤔",
|
||||
"083": "😐",
|
||||
"085": "🤨",
|
||||
"087": "🥴",
|
||||
"089": "🤯",
|
||||
"091": "😢",
|
||||
"093": "😭",
|
||||
"095": "😨",
|
||||
"097": "😱",
|
||||
"099": "🤬"
|
||||
}
|
||||
|
||||
grads = list(toxic_reactions.keys())
|
||||
grads.sort()
|
||||
grads.reverse()
|
||||
|
||||
abusive_reactions = {
|
||||
"085": "🫡",
|
||||
"088": "💅",
|
||||
"091": "🤷♀",
|
||||
"094": "👾",
|
||||
"097": "👻",
|
||||
"099": "😈"
|
||||
}
|
||||
|
||||
abusive_grads = list(abusive_reactions.keys())
|
||||
abusive_grads.sort()
|
||||
abusive_grads.reverse()
|
||||
|
||||
def get_toxic_reply(tx):
|
||||
percentage = tx * 100
|
||||
for key in grads:
|
||||
if percentage > int(key):
|
||||
return toxic_reactions[key]
|
||||
|
||||
|
||||
def get_abusive_reply(tx):
|
||||
percentage = tx * 100
|
||||
for key in abusive_grads:
|
||||
if percentage > int(key):
|
||||
return abusive_reactions[key]
|
|
@ -1,31 +0,0 @@
|
|||
import torch
|
||||
from transformers import AutoTokenizer, \
|
||||
AutoModelForSequenceClassification
|
||||
|
||||
tiny_tox_model_path = 'cointegrated/rubert-tiny-toxicity'
|
||||
tiny_tox_tokenizer = AutoTokenizer.from_pretrained(tiny_tox_model_path)
|
||||
tiny_tox_model = AutoModelForSequenceClassification.from_pretrained(
|
||||
tiny_tox_model_path)
|
||||
|
||||
|
||||
# if torch.cuda.is_available():
|
||||
# model.cuda()
|
||||
|
||||
|
||||
def text2toxicity(text, aggregate=True) -> float:
|
||||
""" Calculate toxicity of a text (if aggregate=True)
|
||||
or a vector of toxicity aspects (if aggregate=False)"""
|
||||
proba = 0.0
|
||||
with torch.no_grad():
|
||||
inputs = tiny_tox_tokenizer(
|
||||
text.lower(),
|
||||
return_tensors='pt',
|
||||
truncation=True,
|
||||
padding=True
|
||||
).to(tiny_tox_model.device)
|
||||
proba = torch.sigmoid(tiny_tox_model(**inputs).logits).cpu().numpy()
|
||||
if isinstance(text, str):
|
||||
proba = proba[0]
|
||||
if aggregate:
|
||||
return 1 - proba.T[0] * (1 - proba.T[-1])
|
||||
return float(proba)
|
|
@ -1,5 +1,2 @@
|
|||
torch
|
||||
transformers
|
||||
transliterate
|
||||
aiohttp
|
||||
redis[hiredis]
|
||||
|
|
Loading…
Reference in New Issue
Block a user