Use typing effect instead of streaming as it causes a lot of issues with telegram rate limits.

main
pluja 2023-03-23 09:14:32 +01:00
rodzic 5b1b14b910
commit be0b0d9c4a
1 zmienionych plików z 21 dodań i 46 usunięć

67
main.py
Wyświetl plik

@ -8,7 +8,9 @@ from dotenv import load_dotenv
from pydub import AudioSegment
from telegram import Update
from functools import wraps
from telegram.error import BadRequest, RetryAfter
from telegram.constants import ChatAction
from functools import wraps
from telegram.error import BadRequest, RetryAfter, TimedOut
from telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes, MessageHandler, filters
logging.basicConfig(
@ -63,6 +65,8 @@ async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
@restricted
async def imagine(update: Update, context: ContextTypes.DEFAULT_TYPE):
users[f"{update.effective_chat.id}"]["usage"]['dalle'] += 1
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
response = openai.Image.create(
prompt=update.message.text,
n=1,
@ -78,7 +82,7 @@ async def imagine(update: Update, context: ContextTypes.DEFAULT_TYPE):
async def attachment(update: Update, context: ContextTypes.DEFAULT_TYPE):
# Initialize variables
chat_id = update.effective_chat.id
user_id = update.effective_user.id
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
users[f"{chat_id}"]["usage"]['whisper'] = 0
transcript = {'text': ''}
@ -148,6 +152,7 @@ async def attachment(update: Update, context: ContextTypes.DEFAULT_TYPE):
@restricted
async def chat(update: Update, context: ContextTypes.DEFAULT_TYPE):
chat_id = str(update.effective_chat.id)
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
# Initialize user if not present
if chat_id not in users:
@ -166,60 +171,30 @@ async def chat(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_context.pop(0)
# Interact with ChatGPT API and stream the response
response = openai.ChatCompletion.create(
model=MODEL,
messages=[{"role": "system", "content": SYSTEM_PROMPT}] + user_context,
stream=True,
temperature=float(TEMPERATURE)
)
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=[{"role": "system", "content": SYSTEM_PROMPT}] + user_context,
temperature=float(TEMPERATURE)
)
except:
await context.bot.send_message(chat_id=update.effective_chat.id, text="There was a problem with OpenAI, so I can't answer you.")
# Initialize variables for streaming
assistant_message = ""
message_sent = False
sent_message = None
# Process response chunks
batch = 3 # Batches of 10 to update
for chunk in response:
batch -= 1
if 'choices' in chunk:
choice = chunk['choices'][0]
if 'delta' in choice and 'content' in choice['delta']:
new_content = choice['delta']['content']
assistant_message += new_content
if 'choices' in response:
assistant_message = response['choices'][0]['message']['content']
await context.bot.send_message(chat_id=update.effective_chat.id, text=assistant_message)
else:
await context.bot.send_message(chat_id=update.effective_chat.id, text="There was a problem with OpenAI. Maybe your prompt is forbidden? They like to censor a lot!")
# Edit the message in real time
if not message_sent:
sent_message = await context.bot.send_message(chat_id=update.effective_chat.id, text=assistant_message)
message_sent = True
else:
if batch == 0:
try:
if new_content.strip() != "":
await context.bot.edit_message_text(chat_id=update.effective_chat.id, message_id=sent_message.message_id, text=assistant_message)
except BadRequest as e:
if "Message is not modified" not in str(e):
raise e
except RetryAfter as e:
await asyncio.sleep(e.retry_after)
batch = 3
# Final check to ensure the entire message is displayed
try:
await context.bot.edit_message_text(chat_id=update.effective_chat.id, message_id=sent_message.message_id, text=assistant_message)
except BadRequest as e:
if "Message is not modified" not in str(e):
raise e
except RetryAfter as e:
await asyncio.sleep(e.retry_after)
# Update context
user_context.append({"role": "assistant", "content": assistant_message})
if len(user_context) > MAX_USER_CONTEXT:
user_context.pop(0)
# Update usage
users[chat_id]["usage"]['chatgpt'] += round(len(str(user_context))/3, 0)
users[chat_id]["usage"]['chatgpt'] += int(response['usage']['total_tokens'])
@restricted
async def clear(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: