diff --git a/__init__.py b/__init__.py index e2d23ba..486b507 100644 --- a/__init__.py +++ b/__init__.py @@ -11,7 +11,7 @@ bl_info = { "author": "Ryvn (@hc-psy) (@@hao-chenglo2049)", "description": "", "blender": (2, 82, 0), - "version": (0, 0, 1), + "version": (0, 0, 2), "warning": "", "category": "Object" } diff --git a/gpt_cst.py b/gpt_cst.py new file mode 100644 index 0000000..94a3094 --- /dev/null +++ b/gpt_cst.py @@ -0,0 +1,206 @@ +SYSTEMPROMPTS = [ + """ + I am acting as a professional 3D artist skilled in scripting within Blender, the 3D software. My expertise lies in understanding the relationship between conceptual 3D models, animation ideas, and their corresponding Python scripts in Blender. + Here are the guidelines for our interaction: + - I will only respond with Python code relevant to Blender scripting, enclosed in markdown code blocks (```). + - My responses will focus on importing complete modules rather than individual components for scripting. + - I'll ensure to avoid destructive operations on meshes in my code suggestions. + - My responses will strictly adhere to the task at hand. I won't include additional setup like render settings or camera additions unless explicitly requested. + - My replies will consist solely of Python code without any additional explanations or comments. + - The input provided might be in English, Traditional Chinese and Simplified Chinese, and I will respond accordingly. + This setup is designed for a tailor-made Blender GPT to assist with 3D modeling and animation scripting in Python. + """, + """ + Here are a few examples to illustrate how I can assist: + 1. Task: Create 10 Cubes in Random Locations + - User Prompt Content: "Create 10 cubes in random locations from -1 to 1" + - Assistant Response Content: + ```python + import bpy + import random + + count = 10 + for _ in range(count): + x = random.uniform(-1, 1) + y = random.uniform(-1, 1) + z = random.uniform(-1, 1) + bpy.ops.mesh.primitive_cube_add(location=(x, y, z)) + ``` + + 2. Task: Delete All Mesh Objects and Create a Sphere + - User Prompt Content: Delete all mesh objects in the scene and create a 5x5x5 ball in the scene + - Assistant Response Content: + ```python + import bpy + + bpy.ops.object.select_all(action='DESELECT') + bpy.ops.object.select_by_type(type='MESH') + bpy.ops.object.delete() + + bpy.ops.mesh.primitive_uv_sphere_add(location=(0, 0, 0), radius=2.5) + ``` + + 3. Task: Create a Rigid Body Simulation with a Tower of Cubes + - User Prompt Content: "Create a rigid body sim with 10 1x1x1 cubes stacked as a tower that falls down onto a 20x20 plane" + - Assistant Response Content: + ```python + import bpy + + for i in range(10): + bpy.ops.mesh.primitive_cube_add(location=(0, 0, 1 + i)) + bpy.ops.rigidbody.object_add() + bpy.context.object.scale = (1, 1, 1) + + bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0)) + bpy.ops.rigidbody.object_add() + bpy.context.object.rigid_body.type = 'PASSIVE' + bpy.context.object.scale = (20, 20, 1) + + bpy.context.scene.frame_end = 200 + ``` + """ +] + +UI = { + 'language': { + 'zh': '繁體中文', + 'cn': '简体中文', + 'en': 'English', + 'es': 'Español', + 'fr': 'Français', + }, + 'label_language': { + 'zh': '語言', + 'cn': '语言', + 'en': 'Language', + 'es': 'Idioma', + 'fr': 'Langue', + }, + 'label_model': { + 'zh': 'GPT 模型', + 'cn': 'GPT 模型', + 'en': 'GPT Model', + 'es': 'Modelo GPT', + 'fr': 'Modèle GPT', + }, + 'label_model_description': { + 'zh': '請選擇欲使用的GPT模型', + 'cn': '请选择要使用的GPT模型', + 'en': 'Please select the GPT model', + 'es': 'Por favor seleccione el modelo GPT', + 'fr': 'Veuillez sélectionner le modèle GPT', + }, + 'model_options': { + 'zh': { + 'gpt3.5': 'GPT-3.5 (便宜但較容易出錯)', + 'gpt4': 'GPT-4 (昂貴但較詳細準確)', + }, + 'cn': { + 'gpt3.5': 'GPT-3.5 (便宜但較容易出错)', + 'gpt4': 'GPT-4 (昂贵但较详细准确)', + }, + 'en': { + 'gpt3.5': 'GPT-3.5 (Affordable but less accurate)', + 'gpt4': 'GPT-4 (Expensive but more accurate)', + }, + 'es': { + 'gpt3.5': 'GPT-3.5 (Asequible pero menos preciso)', + 'gpt4': 'GPT-4 (Caro pero más preciso)', + }, + 'fr': { + 'gpt3.5': 'GPT-3.5 (Abordable mais moins précis)', + 'gpt4': 'GPT-4 (Coûteux mais plus précis)', + }, + }, + 'label_history': { + 'zh': '對話歷史紀錄', + 'cn': '对话历史纪录', + 'en': 'Chat History', + 'es': 'Historial de chat', + 'fr': 'Historique du chat', + }, + 'label_show_code': { + 'zh': '顯示程式碼', + 'cn': '显示代码', + 'en': 'Show Code', + 'es': 'Mostrar código', + 'fr': 'Afficher le code', + }, + 'label_user': { + 'zh': '指令>', + 'cn': '指令>', + 'en': 'Prompt>', + 'es': 'Indicación>', + 'fr': 'Indication>', + }, + 'button_send': { + 'zh': '請稍候,模型正在編寫腳本...', + 'cn': '请稍候,模型正在编写脚本...', + 'en': 'Please wait, the model is writing the script...', + 'es': 'Por favor espere, el modelo está escribiendo el script...', + 'fr': 'Veuillez patienter, le modèle écrit le script...', + }, + 'button_submit': { + 'zh': '送出指令', + 'cn': '提交指令', + 'en': 'Submit Prompt', + 'es': 'Enviar indicación', + 'fr': 'Soumettre l\'indication', + }, + 'button_regenerate': { + 'zh': '重新生成', + 'cn': '重新生成', + 'en': 'Regenerate Response', + 'es': 'Regenerar respuesta', + 'fr': 'Régénérer la réponse', + }, + 'command': { + 'zh': '指令', + 'cn': '指令', + 'en': 'Prompt', + 'es': 'Indicación', + 'fr': 'Indication', + }, + 'command_instruction': { + 'zh': '請輸入指令', + 'cn': '请输入指令', + 'en': 'Please enter the command', + 'es': 'Por favor ingrese la indicación', + 'fr': 'Veuillez entrer l\'indication', + }, + 'button_delete_all': { + 'zh': '刪除所有對話', + 'cn': '删除所有对话', + 'en': 'Delete History', + 'es': 'Eliminar historial', + 'fr': 'Supprimer l\'historique', + }, + 'button_delete': { + 'zh': '刪除此回答', + 'cn': '删除此回答', + 'en': 'Delete This Response', + 'es': 'Eliminar esta respuesta', + 'fr': 'Supprimer cette réponse', + }, + 'creativity': { + 'zh': '創意度', + 'cn': '创意度', + 'en': 'Creativity', + 'es': 'Creatividad', + 'fr': 'Créativité', + }, + 'no_openai_key_error': { + 'zh': '錯誤: 沒有偵測到 OPENAI API Key,請在插件設定中設定 OPENAI API Key', + 'cn': '错误: 没有检测到 OPENAI API Key,请在插件设置中设置 OPENAI API Key', + 'en': 'Error: No OPENAI API Key detected, please set OPENAI API Key in the add-on preferences', + 'es': 'Error: No se detectó ninguna clave de API de OPENAI, configure la clave de API de OPENAI en las preferencias del complemento', + 'fr': 'Erreur: aucune clé API OPENAI détectée, veuillez définir la clé API OPENAI dans les préférences du module complémentaire', + }, + 'no_prompt_error': { + 'zh': '錯誤: 請輸入指令', + 'cn': '错误: 请输入指令', + 'en': 'Error: Please enter the prompt', + 'es': 'Error: Por favor ingrese la indicación', + 'fr': 'Erreur: Veuillez entrer l\'indication', + }, +} diff --git a/gpt_gpt.py b/gpt_gpt.py index 0d1743d..a350d4a 100644 --- a/gpt_gpt.py +++ b/gpt_gpt.py @@ -1,69 +1,6 @@ -import openai +from openai import OpenAI import re - - -def SYS_MAIN_PROMPT(language): return f""" -I want you to act as a professional 3D artist who is proficient in writing scripts in Blender, the 3D software. And you know how the relationship between the conceptual 3D model and animation ideas and the corresponding python scripts. -Here are some rules you have to heed and follow: -- Respond with your answers in markdown (```). -- Preferably import entire modules instead of bits. -- Do not perform destructive operations on the meshes. -- Do not use cap_ends. Do not do more than what is asked (setting up render settings, adding cameras, etc) -- Do not respond with anything that is not Python code. -- Do not provide explanations and comments. -- The input might be in {language}. -""" - - -EX_1_USER = """create 10 cubes in random locations from -1 to 1""" - -EX_1_ASSISTANT = """``` -import bpy -import random -bpy.ops.mesh.primitive_cube_add() - -count = 10 - -for _ in range(count): - x = random.randint(-1,1) - y = random.randint(-1,1) - z = random.randint(-1,1) - bpy.ops.mesh.primitive_cube_add(location=(x,y,z)) -```""" - -EX_2_USER = """delete all mesh objects in the scene and create a 5x5x5 ball in the scence""" - -EX_2_ASSISTANT = """``` -import bpy - -bpy.ops.object.select_all(action='DESELECT') -bpy.ops.object.select_by_type(type='MESH') -bpy.ops.object.delete() - -bpy.ops.mesh.primitive_uv_sphere_add(location=(0, 0, 0), radius=2.5) -```""" - -EX_3_USER = """create a rigid body sim with 10 1x1x1 cubes stacked as a tower taht falls down onto a 20x20 plane""" - -EX_3_ASSISTANT = """``` -import bpy - -bpy.ops.mesh.primitive_cube_add(location=(0, 0, 9)) -bpy.ops.rigidbody.object_add() -bpy.ops.transform.resize(value=(0.5, 0.5, 0.5)) - -for i in range(9): - bpy.ops.mesh.primitive_cube_add(location=(0, 0, 9+i+1)) - bpy.ops.rigidbody.object_add() - bpy.ops.transform.resize(value=(.5, .5, .5)) - -bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0)) -bpy.ops.rigidbody.object_add() -bpy.context.object.rigid_body.type = 'PASSIVE' -bpy.ops.transform.resize(value=(10, 10, 1)) - -bpy.context.scene.frame_end = 200 -```""" +from .gpt_cst import SYSTEMPROMPTS def post_process(final_txt): @@ -78,29 +15,10 @@ def post_process(final_txt): def chatgpt(context): scene = context.scene - lan = int(scene.lan) - languages = ['traditional chinese', 'simplified chinese', 'english'] - models = [scene.model_0, scene.model_1, scene.model_2] - prompts = [scene.prompt_input_0, - scene.prompt_input_1, scene.prompt_input_2] - temperatures = [scene.t_0, - scene.t_1, scene.t_2] - - # sys data preparation - messages = [{"role": "system", "content": SYS_MAIN_PROMPT(languages[lan])}] - messages.append( - {"role": "system", "name": "example_user", "content": EX_1_USER}) - messages.append( - {"role": "system", "name": "example_assistant", "content": EX_1_ASSISTANT}) - messages.append( - {"role": "system", "name": "example_user", "content": EX_2_USER}) - messages.append( - {"role": "system", "name": "example_assistant", "content": EX_2_ASSISTANT}) - messages.append( - {"role": "system", "name": "example_user", "content": EX_3_USER}) - messages.append( - {"role": "system", "name": "example_assistant", "content": EX_3_ASSISTANT}) + # sysprompt preparation + messages = [{"role": "system", "content": system_prompt} + for system_prompt in SYSTEMPROMPTS] # add previous messages for msg in scene.history[-8:]: @@ -108,44 +26,23 @@ def chatgpt(context): messages.append( {"role": "assistant", "content": "```\n" + msg.content + "\n```"}) else: - messages.append({"role": "user", - "content": msg.content}) + messages.append({"role": "user", "content": msg.content}) + # add the current user message if messages[-1]["role"] != "user": - # add the current user message - messages.append({"role": "user", "content": "Please provide me with Blender (3D software) python code satisfying the following task: " + - prompts[lan] + ". \n. Do not provide with anything that is not Python code. Do not provide explanations and comments."}) + formatted_message = f"Please provide me with Blender (3D software) python code satisfying the following task: {scene.prompt_input}. \n. Do not provide with anything that is not Python code. Do not provide explanations and comments." + messages.append({"role": "user", "content": formatted_message}) - response = openai.ChatCompletion.create( - model=models[lan], + # send message to GPT + client = OpenAI() + response = client.ChatCompletion.create( + model=scene.model, messages=messages, - temperature=temperatures[lan], - # stream=True, - max_tokens=2000, + temperature=scene.creativity, ) try: - # events = [] - # final_txt = '' - - # # becuase stream = true so use delta to concatentate - # for e in response: - # if len(e['choices'][0]['delta']) == 0: - # continue - - # if 'role' in e['choices'][0]['delta']: - # continue - - # events.append(e) - # event_text = e['choices'][0]['delta']['content'] - # final_txt += event_text - # print(final_txt, flush=True, end='\r') - - # return post_process(final_txt) - final_txt = response['choices'][0]['message']['content'] - return post_process(final_txt) - except IndexError: - return None + return '' diff --git a/gpt_opt.py b/gpt_opt.py index b188288..76226f1 100644 --- a/gpt_opt.py +++ b/gpt_opt.py @@ -1,9 +1,10 @@ import bpy - from bpy.types import Operator + import openai from .gpt_gpt import chatgpt +from .gpt_cst import UI class BLENDERGPT_OT_DEL_MSG(Operator): @@ -12,7 +13,7 @@ class BLENDERGPT_OT_DEL_MSG(Operator): bl_description = "delete message" bl_options = {"REGISTER", "UNDO"} - msg_idx: bpy.props.IntProperty(name="訊息索引", default=0) + msg_idx: bpy.props.IntProperty(name="msg_idx", default=0) def execute(self, context): scene = context.scene @@ -45,14 +46,7 @@ class BLENDERGPT_OT_GPT_CODE(Operator): def execute(self, context): - # text area - if int(context.scene.lan) == 0: - txt_name = '指令腳本.py' - elif int(context.scene.lan) == 1: - txt_name = '指令脚本.py' - else: - txt_name = 'prompt_script.py' - + txt_name = 'prompt_script.py' txt = bpy.data.texts.get(txt_name) if txt is None: @@ -93,37 +87,21 @@ class BLENDERGPT_OT_SEND_MSG(Operator): def execute(self, context): scene = context.scene - # TODO: connect to GPT + # openai api key prf = context.preferences openai.api_key = prf.addons["blender-gpt"].preferences.openai_key + lan = prf.addons["blender-gpt"].preferences.language if not openai.api_key: - if int(context.scene.lan) == 0: - self.report( - {'ERROR'}, "錯誤: 沒有偵測到 OPENAI API Key,請在插件設定中設定 OPENAI API Key") - elif int(context.scene.lan) == 1: - self.report( - {'ERROR'}, "错误: 没有检测到 OPENAI API Key,请在插件设置中设置 OPENAI API Key") - else: - self.report( - {'ERROR'}, "Error: No OPENAI API Key detected, please set OPENAI API Key in the add-on preferences") + self.report({'ERROR'}, UI['error_no_api_key'][lan]) return {'CANCELLED'} scene.on_finish = True # bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1) - lan = int(context.scene.lan) - prompts = [scene.prompt_input_0, - scene.prompt_input_1, scene.prompt_input_2] - if len(scene.history) == 0 or scene.history[-1].type == 'GPT': - if prompts[lan] == "": - if lan == 0: - self.report({'ERROR'}, f"錯誤: 請輸入指令") - elif lan == 1: - self.report({'ERROR'}, f"错误: 请输入指令") - else: - self.report({'ERROR'}, f"Error: Please enter the prompt") + if scene.prompt_input == "": + self.report({'ERROR'}, UI['error_no_prompt'][lan]) scene.on_finish = False return {'CANCELLED'} @@ -137,10 +115,8 @@ class BLENDERGPT_OT_SEND_MSG(Operator): if len(scene.history) == 0 or scene.history[-1].type == 'GPT': msg = scene.history.add() msg.type = 'USER' - msg.content = prompts[lan] - scene.prompt_input_0 = "" - scene.prompt_input_1 = "" - scene.prompt_input_2 = "" + msg.content = scene.prompt_input + scene.prompt_input = "" if code_exe_blender: msg = scene.history.add() diff --git a/gpt_pnl.py b/gpt_pnl.py index b6c89a9..ba3a41a 100644 --- a/gpt_pnl.py +++ b/gpt_pnl.py @@ -1,29 +1,7 @@ import bpy from bpy.types import Panel - - -UI_lan = { - 'language': ['繁體中文', '简体中文', 'English'], - 'label_language': ['語言', '语言', 'Language'], - 'label_model': ['Chat-GPT 模型', 'Chat-GPT 模型', 'Chat-GPT Model'], - 'label_model_description': ['請選擇欲使用的Chat-GPT模型', '请选择要使用的Chat-GPT模型', 'Please select the Chat-GPT model'], - 'model_options': { - 'gpt3.5': ['GPT-3.5 (便宜但較容易出錯)', 'GPT-3.5 (便宜但較容易出错)', 'GPT-3.5 (Affordable but less accurate)'], - 'gpt4': ['GPT-4 (昂貴但較詳細準確)', 'GPT-4 (昂贵但较详细准确)', 'GPT-4 (Expensive but more accurate)'], - }, - 'label_history': ['對話歷史紀錄', '对话历史纪录', 'Chat History'], - 'label_show_code': ['顯示程式碼', '显示代码', 'Show Code'], - 'label_user': ['指令>', '指令>', 'Prompt>'], - 'button_send': ['請稍候,模型正在編寫腳本...', '请稍候,模型正在编写脚本...', 'Please wait, the model is writing the script...'], - 'button_submit': ['送出指令', '提交指令', 'Submit Prompt'], - 'button_regenerate': ['重新生成', '重新生成', 'Regenerate Response'], - 'command': ['指令', '指令', 'Prompt'], - 'command_instruction': ['請輸入指令', '请输入指令', 'Please enter the command'], - 'button_delete_all': ['刪除所有對話', '删除所有对话', 'Delete History'], - 'button_delete': ['刪除此回答', '删除此回答', 'Delete This Response'], - 'creativity': ['創意度', '创意度', 'Creativity'], -} +from .gpt_cst import UI class BLENDERGPT_PT_PANEL(Panel): @@ -34,43 +12,33 @@ class BLENDERGPT_PT_PANEL(Panel): bl_category = 'Blender GPT' def draw(self, context): - layout = self.layout + addon_prefs = context.preferences.addons['blender-gpt'].preferences + lan = addon_prefs.language - lan_idx = int(context.scene.lan) + layout = self.layout column = layout.column(align=True) - # language usage - row = column.row(align=True) - row.label(text=UI_lan['label_language'][lan_idx]) - row.prop(context.scene, "lan", text="") + # language youre using + column.label(text=UI['label_language'][lan]) column.separator() # model of chat gpt - column.label(text=UI_lan['label_model'][lan_idx]) - if lan_idx == 0: - column.prop(context.scene, "model_0", text="") - elif lan_idx == 1: - column.prop(context.scene, "model_1", text="") - else: - column.prop(context.scene, "model_2", text="") + column.label(text=UI['label_model'][lan]) + column.prop(context.scene, "model", text="") column.separator() # creativity - column.label(text=UI_lan['creativity'][lan_idx]) - if lan_idx == 0: - column.prop(context.scene, "t_0", text="") - elif lan_idx == 1: - column.prop(context.scene, "t_1", text="") - else: - column.prop(context.scene, "t_2", text="") + column.label(text=UI['creativity'][lan]) + column.prop(context.scene, "creativity", text="") column.separator() + # history of chat if len(context.scene.history) > 0: - column.label(text=UI_lan['label_history'][lan_idx]) + column.label(text=UI['label_history'][lan]) box = column.box() for index, message in enumerate(context.scene.history): if message.type == 'GPT': @@ -89,7 +57,7 @@ class BLENDERGPT_PT_PANEL(Panel): else: row = box.row() row.label( - text=f"{UI_lan['label_user'][lan_idx]}{message.content}") + text=f"{UI['label_user'][lan]}{message.content}") if index == len(context.scene.history) - 2: del_msg_op = row.operator( @@ -100,60 +68,57 @@ class BLENDERGPT_PT_PANEL(Panel): # input of chat if len(context.scene.history) == 0 or (len(context.scene.history) > 0 and context.scene.history[-1].type != 'USER'): - column.label(text=UI_lan['command'][lan_idx]) - if lan_idx == 0: - column.prop(context.scene, "prompt_input_0", text="") - elif lan_idx == 1: - column.prop(context.scene, "prompt_input_1", text="") - else: - column.prop(context.scene, "prompt_input_2", text="") + column.label(text=UI['command'][lan]) + column.prop(context.scene, "prompt_input", text="") # send message if len(context.scene.history) > 0 and context.scene.history[-1].type == 'USER': - button_label = UI_lan['button_send'][lan_idx] if context.scene.on_finish else UI_lan['button_regenerate'][lan_idx] + button_label = UI['button_send'][lan] if context.scene.on_finish else UI['button_regenerate'][lan] else: - button_label = UI_lan['button_send'][lan_idx] if context.scene.on_finish else UI_lan['button_submit'][lan_idx] + button_label = UI['button_send'][lan] if context.scene.on_finish else UI['button_submit'][lan] column.operator("gpt.send_msg", text=button_label, icon="PLAY") column.separator() column.operator("gpt.del_all_msg", - text=UI_lan['button_delete_all'][lan_idx], icon="TRASH") + text=UI['button_delete_all'][lan], icon="TRASH") -def model_props_generator(idx): +def model_props_generator(): + addon_prefs = bpy.context.preferences.addons['blender-gpt'].preferences + lan = addon_prefs.language + return bpy.props.EnumProperty( - name=UI_lan['label_model'][idx], - description=UI_lan['label_model_description'][idx], + name=UI['label_model'][lan], + description=UI['label_model_description'][lan], items=[ - ("gpt-3.5-turbo", UI_lan['model_options']['gpt3.5'] - [idx], UI_lan['model_options']['gpt3.5'][idx]), - ("gpt-4", UI_lan['model_options']['gpt4'] - [idx], UI_lan['model_options']['gpt4'][idx]), + ("gpt-3.5-turbo", UI['model_options']['gpt3.5'] + [lan], UI['model_options']['gpt3.5'][lan]), + ("gpt-4", UI['model_options']['gpt4'] + [lan], UI['model_options']['gpt4'][lan]), ], default="gpt-3.5-turbo", ) -def prompt_input_generator(idx): +def prompt_input_generator(): + addon_prefs = bpy.context.preferences.addons['blender-gpt'].preferences + lan = addon_prefs.language + return bpy.props.StringProperty( - name=UI_lan['command'][idx], - description=UI_lan['command_instruction'][idx], + name=UI['command'][lan], + description=UI['command_instruction'][lan], default="", ) -bpy.props.StringProperty( - name="a", - description="a", - default="", -) +def temperature_generator(): + addon_prefs = bpy.context.preferences.addons['blender-gpt'].preferences + lan = addon_prefs.language - -def temperature_generator(idx): return bpy.props.FloatProperty( - name=UI_lan['creativity'][idx], - description=UI_lan['creativity'][idx], + name=UI['creativity'][lan], + description=UI['creativity'][lan], default=0, min=0, max=1, @@ -161,33 +126,12 @@ def temperature_generator(idx): def props_initialization(): - bpy.types.Scene.history = bpy.props.CollectionProperty( type=bpy.types.PropertyGroup) - bpy.types.Scene.lan = bpy.props.EnumProperty( - name="語言", - description="請選擇語言", - items=[ - ("0", "繁體中文", "繁體中文"), - ("1", "简体中文", "简体中文"), - ("2", "English", "英文"), - ], - default="0", - ) - - bpy.types.Scene.model_0 = model_props_generator(0) - bpy.types.Scene.model_1 = model_props_generator(1) - bpy.types.Scene.model_2 = model_props_generator(2) - - bpy.types.Scene.prompt_input_0 = prompt_input_generator(0) - bpy.types.Scene.prompt_input_1 = prompt_input_generator(1) - bpy.types.Scene.prompt_input_2 = prompt_input_generator(2) - - bpy.types.Scene.t_0 = temperature_generator(0) - bpy.types.Scene.t_1 = temperature_generator(1) - bpy.types.Scene.t_2 = temperature_generator(2) - + bpy.types.Scene.model = model_props_generator() + bpy.types.Scene.prompt_input = prompt_input_generator() + bpy.types.Scene.creativity = temperature_generator() bpy.types.Scene.on_finish = bpy.props.BoolProperty(default=False) bpy.types.PropertyGroup.type = bpy.props.StringProperty() @@ -196,14 +140,7 @@ def props_initialization(): def props_clear(): del bpy.types.Scene.history - del bpy.types.Scene.lan - del bpy.types.Scene.model_0 - del bpy.types.Scene.model_1 - del bpy.types.Scene.model_2 - del bpy.types.Scene.prompt_input_0 - del bpy.types.Scene.prompt_input_1 - del bpy.types.Scene.prompt_input_2 - del bpy.types.Scene.t_0 - del bpy.types.Scene.t_1 - del bpy.types.Scene.t_2 + del bpy.types.Scene.model + del bpy.types.Scene.prompt_input + del bpy.types.Scene.creativity del bpy.types.Scene.on_finish diff --git a/gpt_prf.py b/gpt_prf.py index 0442597..1ff8e5e 100644 --- a/gpt_prf.py +++ b/gpt_prf.py @@ -1,6 +1,7 @@ +import bpy from bpy import props from bpy.types import AddonPreferences - +from .gpt_cst import UI class BLENDERGPT_AddonPreferences(AddonPreferences): bl_idname = "blender-gpt" @@ -12,6 +13,69 @@ class BLENDERGPT_AddonPreferences(AddonPreferences): subtype="PASSWORD", ) + languages = [ + ('en', "English", ""), + ('es', "Español", ""), + ('zh', "繁體中文", ""), + ('cn', "简体中文", ""), + ('fr', "Français", ""), + ] + language: props.EnumProperty( + name="Language", + items=languages, + default='en', + description="Select your preferred language" + update=update_language, + ) + def draw(self, context): layout = self.layout layout.prop(self, "openai_key") + layout.prop(self, "language", text="Language") + + def update_language(self, context): + prefs = context.preferences.addons['blender-gpt'].preferences + lan = prefs.language + + # model + current_model = getattr(context.scene, "model", "gpt-3.5-turbo") + + bpy.types.Scene.model = bpy.props.EnumProperty( + name=UI['label_model'][lan], + description=UI['label_model_description'][lan], + items=[ + ("gpt-3.5-turbo", UI['model_options']['gpt3.5'] + [lan], UI['model_options']['gpt3.5'][lan]), + ("gpt-4", UI['model_options']['gpt4'] + [lan], UI['model_options']['gpt4'][lan]), + ], + default=current_model, + ) + setattr(context.scene, "model", current_model) + + # prompt_input + current_prompt_input = getattr(context.scene, "prompt_input", "") + + bpy.types.Scene.prompt_input = bpy.props.StringProperty( + name=UI['command'][lan], + description=UI['command_instruction'][lan], + default=current_prompt_input, + ) + + setattr(context.scene, "prompt_input", current_prompt_input) + + # creativity + current_creativity = getattr(context.scene, "creativity", 0) + + bpy.types.Scene.creativity = bpy.props.FloatProperty( + name=UI['creativity'][lan], + description=UI['creativity'][lan], + default=current_creativity, + min=0, + max=1, + ) + + setattr(context.scene, "creativity", current_creativity) + + +