diff --git a/crazy_functions/CodeInterpreter.py b/crazy_functions/CodeInterpreter.py
new file mode 100644
index 0000000..73e142d
--- /dev/null
+++ b/crazy_functions/CodeInterpreter.py
@@ -0,0 +1,213 @@
+from collections.abc import Callable, Iterable, Mapping
+from typing import Any
+from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone, clear_file_downloadzone
+from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
+from .crazy_utils import input_clipping, try_install_deps
+from multiprocessing import Process, Pipe
+import os
+
+templete = """
+```python
+import ... # Put dependencies here, e.g. import numpy as np
+
+class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
+
+ def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
+ # rewrite the function you have just written here
+ ...
+ return generated_file_path
+```
+"""
+
+def inspect_dependency(chatbot, history):
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
+ return True
+
+def get_code_block(reply):
+ import re
+ pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
+ matches = re.findall(pattern, reply) # find all code blocks in text
+ if len(matches) == 1:
+ return matches[0].strip('python') # code block
+ for match in matches:
+ if 'class TerminalFunction' in match:
+ return match.strip('python') # code block
+ raise RuntimeError("GPT is not generating proper code.")
+
+def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
+ # 输入
+ prompt_compose = [
+ f'Your job:\n'
+ f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
+ f"2. You should write this function to perform following task: " + txt + "\n",
+ f"3. Wrap the output python function with markdown codeblock."
+ ]
+ i_say = "".join(prompt_compose)
+ demo = []
+
+ # 第一步
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ inputs=i_say, inputs_show_user=i_say,
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
+ sys_prompt= r"You are a programmer."
+ )
+ history.extend([i_say, gpt_say])
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
+
+ # 第二步
+ prompt_compose = [
+ "If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
+ templete
+ ]
+ i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ inputs=i_say, inputs_show_user=inputs_show_user,
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ sys_prompt= r"You are a programmer."
+ )
+ code_to_return = gpt_say
+ history.extend([i_say, gpt_say])
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
+
+ # # 第三步
+ # i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
+ # i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
+ # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ # inputs=i_say, inputs_show_user=inputs_show_user,
+ # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ # sys_prompt= r"You are a programmer."
+ # )
+ # # # 第三步
+ # i_say = "Show me how to use `pip` to install packages to run the code above. "
+ # i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
+ # installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ # inputs=i_say, inputs_show_user=i_say,
+ # llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ # sys_prompt= r"You are a programmer."
+ # )
+ installation_advance = ""
+
+ return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
+
+def make_module(code):
+ module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
+ with open(f'gpt_log/{module_file}.py', 'w', encoding='utf8') as f:
+ f.write(code)
+
+ def get_class_name(class_string):
+ import re
+ # Use regex to extract the class name
+ class_name = re.search(r'class (\w+)\(', class_string).group(1)
+ return class_name
+
+ class_name = get_class_name(code)
+ return f"gpt_log.{module_file}->{class_name}"
+
+def init_module_instance(module):
+ import importlib
+ module_, class_ = module.split('->')
+ init_f = getattr(importlib.import_module(module_), class_)
+ return init_f()
+
+def for_immediate_show_off_when_possible(file_type, fp, chatbot):
+ if file_type in ['png', 'jpg']:
+ image_path = os.path.abspath(fp)
+ chatbot.append(['这是一张图片, 展示如下:',
+ f'本地文件地址:
`{image_path}`
'+
+ f'本地文件预览: