Update to a much better code and features

main
pluja 2023-04-19 18:14:20 +02:00
rodzic 6502efa131
commit 486a6157a9
7 zmienionych plików z 298 dodań i 294 usunięć

14
.env.example 100644
Wyświetl plik

@ -0,0 +1,14 @@
OPENAI_API_KEY=your-openai-api-key
OPENAI_MODEL=gpt-3.5-turbo
CHATGPT_SYSTEM_PROMPT=You are a helpful assistant. Always use Markdown for formatting.
CHATGPT_MAX_USER_CONTEXT=5
CHATGPT_TEMPERATURE=1.0
# Use Whisper transcript from voice message with ChatGPT
WHISPER_TO_CHAT=1
# Use Google TTS for speech to text
ENABLE_GOOGLE_TTS=0
BOT_TOKEN=your-telegram-bot-token
BOT_ALLOWED_USERS= XXXX,YYYY # Comma separated list of Telegram user IDs

Wyświetl plik

@ -1,8 +1,14 @@
FROM python:3-bullseye
FROM python:3.10-slim
RUN apt update && apt install -y ffmpeg
RUN apt update && apt install -y ffmpeg libespeak1
WORKDIR /app
COPY ./*.py /app
COPY ./main.py /app
COPY ./database.py /app
COPY ./requirements.txt /app
RUN mkdir db_data
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
CMD [ "python3", "/app/main.py" ]

Wyświetl plik

@ -7,16 +7,20 @@ A telegram bot to interact with OpenAI API. You can:
- Generate images with DALL-E: `/imagine`
- Chat with ChatGPT: Just chat!
- Transcribe audio and video to text: Just send a voice message or a video file!
- Voice chat with ChatGPT:
- Send voice message.
- Receive voice messages.
- Use GoogleTTS or 100% local Espeak (more robotic).
Other features include:
- Talk to ChatGPT with audio transcriptions (whisper).
- Clear ChatGPT context history (to save tokens).
- Reply to any message to use it as context for ChatGPT.
- Per-user context and usage metrics and spent $.
- No database, data is saved in-memory.
- A drawback of this is that data is reset on each docker restart. Will look into solutions for this.
- Lightweight: a single python file.
- Per-user context.
- See usage metrics and spent $.
- Persistent data with sqlite3 database.
- Lightweight: few lines of code.
[Jump to selfhosting guide](#self-hosting)
@ -43,13 +47,15 @@ Self hosting this chatbot is pretty easy. You just need to follow this steps:
4. Setup the bot:
1. Clone this repo.
2. Rename the `example.docker-compose.yml` file to `docker-compose.yml`.
3. Edit the environment variables:
2. Rename the `example.env` file to `.env`.
3. Edit the environment variables from the `.env` file:
1. Set your OPENAI_TOKEN.
2. Set your BOT_TOKEN.
3. Set your ALLOWED_USERS (comma separated user ids). Set it to `*` to allow all users.
4. Set the SYSTEM_PROMPT for ChatGPT. This is always instructed to ChatGPT as the system.
5. Optional: Edit the MAX_CONTEXT. This variable sets the number of messages that will be sent to ChatGPT API as context for the conversation.
6. WHISPER_TO_CHAT allows you to choose wether Whisper transcripts should be instructed to ChatGPT or not.
6. ENABLE_GOOGLE_TTS the TTS service will be provided by GoogleTTS, producing more natural voices.
4. Build and start the bot: `docker compose up --build -d`.
5. Enjoy!

Wyświetl plik

@ -2,7 +2,7 @@ import sqlite3
import json
def init_database():
conn = sqlite3.connect("users.db")
conn = sqlite3.connect("/app/db_data/users.db")
c = conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS users (

Wyświetl plik

@ -1,7 +1,8 @@
version: '3.9'
services:
chatbot:
build: .
build:
dockerfile: Dockerfile
environment:
OPENAI_API_KEY: "XXX"
OPENAI_MODEL: gpt-3.5-turbo

541
main.py
Wyświetl plik

@ -1,26 +1,24 @@
import os
import openai
import logging
import database
from dotenv import load_dotenv
from pydub import AudioSegment
from telegram import Update
import os
import tempfile
from functools import wraps
from telegram.constants import ChatAction
from functools import wraps
from telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes, MessageHandler, filters, CallbackQueryHandler
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from io import BytesIO
language_models = {
"en": "tts_models/multilingual/multi-dataset/your_tts",
"fr": "tts_models/multilingual/multi-dataset/your_tts",
"pt": "tts_models/multilingual/multi-dataset/your_tts",
"pt-br": "tts_models/multilingual/multi-dataset/your_tts",
"es": "tts_models/es/css10/vits",
}
import openai
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode
from aiogram.types.input_file import InputFile
from aiogram.utils import executor
from dotenv import load_dotenv
from gtts import gTTS
import pyttsx3
from pydub import AudioSegment
import database
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
@ -30,16 +28,23 @@ load_dotenv()
if os.environ.get("OPENAI_API_KEY") is None:
print("OpenAI_API_KEY is not set in.env file or OPENAI_API_KEY environment variable is not set")
exit(1)
ALLOWED_USERS=os.environ.get("BOT_ALLOWED_USERS").split(",")
SYSTEM_PROMPT=os.environ.get("CHATGPT_SYSTEM_PROMPT")
TEMPERATURE=os.environ.get("CHATGPT_TEMPERATURE")
MODEL=os.environ.get("OPENAI_MODEL")
WHISPER_TO_CHAT=bool(int(os.environ.get("WHISPER_TO_CHAT")))
MAX_USER_CONTEXT=int(os.environ.get("CHATGPT_MAX_USER_CONTEXT"))
BOT_TOKEN = os.getenv("BOT_TOKEN")
bot = Bot(token=BOT_TOKEN)
dp = Dispatcher(bot)
dp.middleware.setup(LoggingMiddleware())
ALLOWED_USERS = os.environ.get("BOT_ALLOWED_USERS").split(",")
SYSTEM_PROMPT = os.environ.get("CHATGPT_SYSTEM_PROMPT")
TEMPERATURE = os.environ.get("CHATGPT_TEMPERATURE")
MODEL = os.environ.get("OPENAI_MODEL")
WHISPER_TO_CHAT = bool(int(os.environ.get("WHISPER_TO_CHAT")))
ENABLE_GOOGLE_TTS = bool(int(os.environ.get("ENABLE_GOOGLE_TTS")))
MAX_USER_CONTEXT = int(os.environ.get("CHATGPT_MAX_USER_CONTEXT"))
openai.api_key = os.environ.get("OPENAI_API_KEY")
async def getUserData(chat_id):
# Initialize user if not present
async def getUserData(chat_id):
user_data = database.get_user(chat_id)
if not user_data:
user_data = {
@ -56,28 +61,79 @@ async def getUserData(chat_id):
user_data = database.get_user(chat_id)
return user_data
def generate_settings_markup(chat_id: str) -> InlineKeyboardMarkup:
keyboard = [
[
InlineKeyboardButton("Increase Temperature", callback_data=f"setting_inc_temp_{chat_id}"),
InlineKeyboardButton("Decrease Temperature", callback_data=f"setting_dec_temp_{chat_id}")
],
[
InlineKeyboardButton("Enable Whisper", callback_data=f"setting_en_whisper_{chat_id}"),
InlineKeyboardButton("Disable Whisper", callback_data=f"setting_dis_whisper_{chat_id}")
],
[
InlineKeyboardButton("Enable assistant voice", callback_data=f"setting_en_voice_{chat_id}"),
InlineKeyboardButton("Disable assistant voice", callback_data=f"setting_dis_voice_{chat_id}")
],
[
InlineKeyboardButton("Increase Context", callback_data=f"setting_inc_context_{chat_id}"),
InlineKeyboardButton("Decrease Context", callback_data=f"setting_dec_context_{chat_id}")
]
]
return InlineKeyboardMarkup(inline_keyboard=keyboard)
async def text_to_voice(text: str) -> BytesIO:
if ENABLE_GOOGLE_TTS:
tts = gTTS(text)
with tempfile.NamedTemporaryFile(mode='wb', suffix='.mp3', delete=False) as mp3_file:
temp_mp3_filename = mp3_file.name
tts.save(temp_mp3_filename)
else:
engine = pyttsx3.init() # PyTTSX3 Engine
engine.setProperty('rate', 150)
with tempfile.NamedTemporaryFile(mode='wb', suffix='.mp3', delete=False) as mp3_file:
temp_mp3_filename = mp3_file.name
engine.save_to_file(text, temp_mp3_filename)
engine.runAndWait()
mp3_audio = AudioSegment.from_file(temp_mp3_filename, format="mp3")
with tempfile.NamedTemporaryFile(mode="wb", suffix=".ogg", delete=False) as ogg_file:
temp_ogg_filename = ogg_file.name
mp3_audio.export(temp_ogg_filename, format="ogg")
with open(temp_ogg_filename, "rb") as audio_file:
_ = InputFile(audio_file)
voice_data = BytesIO(audio_file.read())
os.remove(temp_mp3_filename)
os.remove(temp_ogg_filename)
voice_data.seek(0)
return voice_data
def restricted(func):
@wraps(func)
async def wrapped(update, context, *args, **kwargs):
if str(update.effective_user.id) not in ALLOWED_USERS:
async def wrapped(message, *args, **kwargs):
user_id = str(message.chat.id)
if user_id not in ALLOWED_USERS:
if "*" != ALLOWED_USERS[0]:
print(f"Unauthorized access denied for {update.effective_user.id}.")
print(f"Unauthorized access denied for {user_id}.")
return
else:
_ = await getUserData(update.effective_chat.id)
return await func(update, context, *args, **kwargs)
_ = await getUserData(user_id)
return await func(message, *args, **kwargs)
return wrapped
async def messageGPT(text: str, chat_id: str, user_name="User"):
user_data = await getUserData(chat_id)
# Update context
async def messageGPT(text: str, chat_id: str, user_name="User"):
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
user_data = await getUserData(chat_id)
user_data['context'].append({"role": "user", "content": text})
if len(user_data['context']) > user_data["options"]["max-context"]:
user_data['context'].pop(0)
# Interact with ChatGPT API and stream the response
response = None
try:
response = openai.ChatCompletion.create(
model=MODEL,
@ -86,164 +142,41 @@ async def messageGPT(text: str, chat_id: str, user_name="User"):
)
except Exception as e:
print(e)
return "There was a problem with OpenAI, so I can't answer you."
return f"There was a problem with OpenAI, so I can't answer you: \n\n{e}"
# Initialize variables for streaming
assistant_message = ""
if 'choices' in response:
assistant_message = response['choices'][0]['message']['content']
else:
assistant_message = "There was a problem with OpenAI. Maybe your prompt is forbidden? They like to censor a lot!"
assistant_message = response.get('choices', [{}])[0].get('message', {"content": None}).get("content", "There was a problem with OpenAI. Maybe your prompt is forbidden? They like to censor a lot!")
# Update context
user_data['context'].append({"role": "assistant", "content": assistant_message})
if len(user_data['context']) > user_data["options"]["max-context"]:
user_data['context'].pop(0)
# Update usage
user_data["usage"]['chatgpt'] += int(response['usage']['total_tokens'])
# Update the user data in the database
user_data["usage"]['chatgpt'] += int(response.get('usage', {"total_tokens": 0})["total_tokens"])
database.update_user(chat_id, user_data)
return assistant_message
@restricted
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
_ = await getUserData(update.effective_chat.id)
await context.bot.send_message(chat_id=update.effective_chat.id, text="Hello, how can I assist you today?")
@restricted
async def imagine(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_data = await getUserData(update.effective_chat.id)
user_data["usage"]['dalle'] += 1
database.update_user(update.effective_chat.id, user_data)
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
response = openai.Image.create(
prompt=update.message.text,
n=1,
size="1024x1024"
)
try:
image_url = response['data'][0]['url']
await context.bot.send_message(chat_id=update.effective_chat.id, text=image_url)
except Exception as e:
print(e)
await context.bot.send_message(chat_id=update.effective_chat.id, text="Error generating. Your prompt may contain text that is not allowed by OpenAI safety system.")
@dp.message_handler(commands=['start'])
@restricted
async def attachment(update: Update, context: ContextTypes.DEFAULT_TYPE):
# Initialize variables
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
async def start(message: types.Message):
_ = await getUserData(message.chat.id)
await message.reply("Hello, how can I assist you today?")
# Get user data or initialize if not present
@dp.message_handler(commands=['clear'], content_types=['text'])
@restricted
async def clear(message: types.Message) -> None:
chat_id = str(message.chat.id)
user_data = await getUserData(chat_id)
#users[f"{chat_id}"]["usage"]['whisper'] = 0
transcript = {'text': ''}
audioMessage = False
# Check if the attachment is a voice message
if update.message.voice:
user_data["usage"]['whisper'] += update.message.voice.duration
file_id = update.message.voice.file_id
file_format = "ogg"
audioMessage = True
# Check if the attachment is a video
elif update.message.video:
user_data["usage"]['whisper'] += update.message.video.duration
file_id = update.message.video.file_id
file_format = "mp4"
# Check if the attachment is an audio file
elif update.message.audio:
user_data["usage"]['whisper'] += update.message.audio.duration
file_id = update.message.audio.file_id
file_format = "mp3"
else:
await context.bot.send_message(chat_id=chat_id, text="Can't handle such file. Reason: unknown.")
return
# Download the file and convert it if necessary
file = await context.bot.get_file(file_id)
user_id = update.effective_user.id
await file.download_to_drive(f"{user_id}.{file_format}")
if file_format == "ogg":
ogg_audio = AudioSegment.from_file(f"{user_id}.ogg", format="ogg")
ogg_audio.export(f"{user_id}.mp3", format="mp3")
os.remove(f"{user_id}.ogg")
file_format = "mp3"
# Transcribe the audio
with open(f"{user_id}.{file_format}", "rb") as audio_file:
try:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
except Exception as e:
print(e)
await context.bot.send_message(chat_id=chat_id, text="Transcript failed.")
os.remove(f"{user_id}.{file_format}")
return
os.remove(f"{user_id}.{file_format}")
# Send the transcript
if transcript['text'] == "":
transcript['text'] = "[Silence]"
if audioMessage and user_data["options"]["whisper_to_chat"]:
chatGPT_response = await messageGPT(transcript['text'], str(chat_id), update.effective_user.name)
transcript['text'] = "> " + transcript['text'] + "\n\n" + chatGPT_response
# Check if the transcript length is longer than 4095 characters
if len(transcript['text']) > 4095:
# Split the transcript into multiple messages without breaking words in half
max_length = 4096
words = transcript['text'].split()
current_message = ""
for word in words:
if len(current_message) + len(word) + 1 > max_length:
await context.bot.send_message(chat_id=chat_id, text=current_message)
current_message = ""
current_message += f"{word} "
if current_message:
await context.bot.send_message(chat_id=chat_id, text=current_message)
else:
await context.bot.send_message(chat_id=chat_id, text=transcript['text'])
# Update user data in the database
database.update_user(str(chat_id), user_data)
@restricted
async def chat(update: Update, context: ContextTypes.DEFAULT_TYPE):
chat_id = str(update.effective_chat.id)
await context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
# Check if replying and add context
if hasattr(update.message.reply_to_message, "text"):
user_prompt = f"In reply to: '{update.message.reply_to_message.text}' \n---\n{update.message.text}"
else:
user_prompt = update.message.text
# Use messageGPT function to get the response
assistant_message = await messageGPT(user_prompt, chat_id, update.effective_user.name)
await context.bot.send_message(chat_id=update.effective_chat.id, text=assistant_message)
@restricted
async def clear(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
user_data = await getUserData(update.effective_chat.id)
if user_data:
user_data["context"] = []
database.update_user(str(update.effective_chat.id), user_data)
print(f"Cleared context for {update.effective_user.name}")
await update.message.reply_text('Your message context history was cleared.')
database.update_user(chat_id, user_data)
print(f"Cleared context for {message.from_user.full_name}")
await message.reply("Your message context history was cleared.")
@dp.message_handler(commands=['usage'])
@restricted
async def usage(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
chat_id = str(update.effective_chat.id)
async def usage(message: types.Message) -> None:
chat_id = str(message.chat.id)
user_data = database.get_user(chat_id)
user_usage = user_data["usage"]
total_usage = database.get_total_usage()
@ -253,7 +186,7 @@ async def usage(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
user_percentage = (user_spent / total_spent) * 100 if total_spent > 0 else 0
info_message = f"""User: {update.effective_user.name}
info_message = f"""User: {message.from_user.full_name}
- Used ~{user_usage["chatgpt"]} tokens with ChatGPT.
- Generated {user_usage["dalle"]} images with DALL-E.
- Transcribed {round(float(user_usage["whisper"]) / 60.0, 2)}min with Whisper.
@ -267,123 +200,165 @@ Total usage:
Total spent: ${total_spent}"""
await context.bot.send_message(chat_id=update.effective_chat.id, text=info_message)
await message.reply(info_message)
@dp.message_handler(lambda message: message.chat.type == types.ChatType.PRIVATE, content_types=['text'], regexp='^/imagine')
@restricted
async def imagine(message: types.Message):
await bot.send_chat_action(message.chat.id, action=types.ChatActions.TYPING)
user_data = await getUserData(message.chat.id)
user_data["usage"]['dalle'] += 1
database.update_user(message.chat.id, user_data)
response = openai.Image.create(
prompt=message.text,
n=1,
size="1024x1024"
)
try:
image_url = response['data'][0]['url']
await message.reply(image_url)
except Exception as e:
print(e)
await message.reply("Error generating. Your prompt may contain text that is not allowed by OpenAI safety system.")
@dp.message_handler(content_types=['photo', 'video', 'audio', 'voice'])
@restricted
async def attachment(message: types.Message):
chat_id = message.chat.id
user_data = await getUserData(chat_id)
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
transcript = {'text': ''}
audioMessage = False
if message.voice:
user_data["usage"]['whisper'] += message.voice.duration
file_id = message.voice.file_id
file_format = "ogg"
audioMessage = True
elif message.video:
user_data["usage"]['whisper'] += message.video.duration
file_id = message.video.file_id
file_format = "mp4"
elif message.audio:
user_data["usage"]['whisper'] += message.audio.duration
file_id = message.audio.file_id
file_format = "mp3"
else:
await message.reply("Can't handle such file. Reason: unknown.")
return
file = await bot.get_file(file_id)
user_id = message.chat.full_name
await file.download(f"{user_id}.{file_format}")
if file_format == "ogg":
ogg_audio = AudioSegment.from_file(f"{user_id}.ogg", format="ogg")
ogg_audio.export(f"{user_id}.mp3", format="mp3")
os.remove(f"{user_id}.ogg")
file_format = "mp3"
with open(f"{user_id}.{file_format}", "rb") as audio_file:
try:
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
transcript = openai.Audio.transcribe("whisper-1", audio_file)
except Exception as e:
print(e)
await message.reply("Transcript failed.")
os.remove(f"{user_id}.{file_format}")
return
os.remove(f"{user_id}.{file_format}")
if transcript['text'] == "":
transcript['text'] = "[Silence]"
chatGPT_response = False
if audioMessage and user_data["options"]["whisper_to_chat"]:
chatGPT_response = await messageGPT(transcript['text'], str(chat_id), message.from_user.full_name)
transcript['text'] = "> " + transcript['text'] + "\n\n" + chatGPT_response
await message.reply(transcript['text'])
if user_data["options"]["assistant_voice_chat"] and chatGPT_response:
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
voice_data = await text_to_voice(chatGPT_response)
await message.reply_voice(voice_data)
database.update_user(str(chat_id), user_data)
@restricted
async def _help(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
help_message="""Here's what you can do:\n\n
- /imagine <prompt> to generate an image with DALL-E\n- Send a message to chat with ChatGPT\n
- Send an audio to transcribe to text with Whisper.\n\n
- /settings To change your settings.\n
- /usage To get your usage statistics.\n
- /clear To clear you chatgpt message context (start a new chat)."""
await context.bot.send_message(chat_id=update.effective_chat.id, text=help_message)
# Function to generate the settings buttons
def generate_settings_markup(chat_id: str) -> InlineKeyboardMarkup:
keyboard = [
[
InlineKeyboardButton("Increase Temperature", callback_data=f"setting_increase_temperature_{chat_id}"),
InlineKeyboardButton("Decrease Temperature", callback_data=f"setting_decrease_temperature_{chat_id}")
],
[
InlineKeyboardButton("Enable Whisper to Chat", callback_data=f"setting_enable_whisper_{chat_id}"),
InlineKeyboardButton("Disable Whisper to Chat", callback_data=f"setting_disable_whisper_{chat_id}")
],
[
InlineKeyboardButton("Enable assistant voice", callback_data=f"setting_enable_voice_{chat_id}"),
InlineKeyboardButton("Disable assistant voice", callback_data=f"setting_disable_voice_{chat_id}")
],
[
InlineKeyboardButton("Increase Context", callback_data=f"setting_increase_context_{chat_id}"),
InlineKeyboardButton("Decrease Context", callback_data=f"setting_decrease_context_{chat_id}")
]
]
return InlineKeyboardMarkup(keyboard)
@restricted
async def settings(update: Update, context: ContextTypes.DEFAULT_TYPE):
chat_id = update.effective_chat.id
@dp.message_handler(commands=['settings'])
async def settings(message: types.Message):
chat_id = str(message.chat.id)
settings_markup = generate_settings_markup(chat_id)
await context.bot.send_message(chat_id=chat_id, text="Settings:", reply_markup=settings_markup)
await message.reply(text='Settings:', reply_markup=settings_markup)
async def settings_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_data = await getUserData(update.effective_chat.id)
query = update.callback_query
action, chat_id = query.data.rsplit("_", 1)
# Temperature
if action.startswith("setting_increase_temperature"):
user_data["options"]["temperature"] = min(user_data["options"]["temperature"] + 0.1, 1)
elif action.startswith("setting_decrease_temperature"):
user_data["options"]["temperature"] = max(user_data["options"]["temperature"] - 0.1, 0)
# Whisper to GPT
elif action.startswith("setting_enable_whisper"):
print(f"enabling whisper for {chat_id}")
user_data["options"]["whisper_to_chat"] = True
elif action.startswith("setting_disable_whisper"):
print(f"disabling whisper for {chat_id}")
user_data["options"]["whisper_to_chat"] = False
# TTS
elif action.startswith("setting_enable_voice"):
print(f"enabling voice for {chat_id}")
user_data["options"]["assistant_voice_chat"] = True
elif action.startswith("setting_disable_voice"):
print(f"disabling voice for {chat_id}")
user_data["options"]["assistant_voice_chat"] = False
# Context
elif action.startswith("setting_increase_context"):
user_data["options"]["max-context"] = min(user_data["options"]["max-context"] + 1, MAX_USER_CONTEXT)
elif action.startswith("setting_decrease_context"):
user_data["options"]["max-context"] = max(user_data["options"]["max-context"] - 1, 1)
async def settings_callback(callback_query: types.CallbackQuery):
user_data = await getUserData(callback_query.message.chat.id)
action, chat_id = callback_query.data.rsplit("_", 1)
options = user_data["options"]
if action.startswith("setting_inc_temp"):
options["temperature"] = min(options["temperature"] + 0.1, 1)
elif action.startswith("setting_dec_temp"):
options["temperature"] = max(options["temperature"] - 0.1, 0)
elif action.startswith("setting_en_whisper"):
options["whisper_to_chat"] = True
elif action.startswith("setting_dis_whisper"):
options["whisper_to_chat"] = False
elif action.startswith("setting_en_voice"):
options["assistant_voice_chat"] = True
elif action.startswith("setting_dis_voice"):
options["assistant_voice_chat"] = False
elif action.startswith("setting_inc_context"):
options["max-context"] = min(options["max-context"] + 1, MAX_USER_CONTEXT)
elif action.startswith("setting_dec_context"):
options["max-context"] = max(options["max-context"] - 1, 1)
settings_markup = generate_settings_markup(chat_id)
await query.edit_message_text(text="Choose a setting option:", reply_markup=settings_markup)
await callback_query.message.edit_text(text='Choose a setting option:', reply_markup=settings_markup)
# Remove the settings message
await context.bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
database.update_user(chat_id, user_data)
settings_txt = f"Updated settings:\n\nTemperature: {options['temperature']}\nWhisper to Chat: {options['whisper_to_chat']}\nAssistant voice: {options['assistant_voice_chat']}\nContext Length: {options['max-context']}"
await callback_query.answer()
await callback_query.message.reply(text=settings_txt)
# Send a message displaying the updated settings
settings_message = f"""Updated settings:\n\nTemperature: {user_data['options']['temperature']}\nWhisper to Chat: {user_data['options']['whisper_to_chat']}\nAssistant voice: {user_data['options']['assistant_voice_chat']}\nContext Length: {user_data["options"]["max-context"]}"""
await context.bot.send_message(chat_id=chat_id, text=settings_message)
@dp.message_handler(lambda message: message.chat.type == types.ChatType.PRIVATE and not message.text.startswith("/"), content_types=['text'])
async def chat(message: types.Message):
chat_id = str(message.chat.id)
user_prompt = message.text
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
assistant_message = await messageGPT(user_prompt, chat_id, message.from_user.full_name)
await message.reply(assistant_message, parse_mode=ParseMode.MARKDOWN)
user_data = await getUserData(chat_id)
if user_data["options"]["assistant_voice_chat"]:
await bot.send_chat_action(chat_id, action=types.ChatActions.TYPING)
voice_data = await text_to_voice(assistant_message)
await message.reply_voice(voice_data)
if __name__ == '__main__':
database.init_database()
try:
ALLOWED_USERS=os.environ.get("BOT_ALLOWED_USERS").split(",")
ALLOWED_USERS = os.environ.get("BOT_ALLOWED_USERS").split(",")
except (Exception):
ALLOWED_USERS=ALLOWED_USERS
ALLOWED_USERS = ALLOWED_USERS
print(f"Allowed users: {ALLOWED_USERS}")
print(f"System prompt: {SYSTEM_PROMPT}")
print(f"Google TTS: {ENABLE_GOOGLE_TTS}")
application = ApplicationBuilder().token(os.environ.get("BOT_TOKEN")).build()
start_handler = CommandHandler('start', start)
application.add_handler(start_handler)
clear_handler = CommandHandler('clear', clear)
application.add_handler(clear_handler)
info_handler = CommandHandler('usage', usage)
application.add_handler(info_handler)
help_handler = CommandHandler('help', _help)
application.add_handler(help_handler)
imagine_handler = CommandHandler('imagine', imagine)
application.add_handler(imagine_handler)
settings_handler = CommandHandler('settings', settings)
application.add_handler(settings_handler)
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, chat))
application.add_handler(MessageHandler(filters.ATTACHMENT & ~filters.COMMAND, attachment))
settings_callback_handler = CallbackQueryHandler(settings_callback)
application.add_handler(settings_callback_handler)
application.run_polling()
# Register message handler and callback query handler for settings
dp.register_message_handler(settings, commands=['settings'])
dp.register_callback_query_handler(settings_callback, lambda c: c.data.startswith('setting_'))
executor.start_polling(dp, skip_updates=True)

Wyświetl plik

@ -1,4 +1,6 @@
aiogram==2.25.1
gTTS==2.3.1
openai==0.27.2
pydub==0.25.1
python-dotenv==1.0.0
python-telegram-bot==20.2
pyttsx3==2.90