def chat():
messages_history = []
while True:
user_input = input("You: ")
if user_input.lower() in ["exit", "quit", "q"]:
break
messages_history.append({"role": "user", "content": user_input})
response = client.chat.completions.create(
model="Qwen/Qwen2.5-7B-Instruct-Turbo",
messages=messages_history,
tools=tools,
)
tool_calls = response.choices[0].message.tool_calls
if tool_calls:
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
if function_name == "read_file":
print(f"Tool call: read_file")
function_response = read_file(
path=function_args.get("path")
)
messages_history.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
elif function_name == "list_files":
print(f"Tool call: list_files")
function_response = list_files(
path=function_args.get("path", ".")
)
messages_history.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
elif function_name == "edit_file":
print(f"Tool call: edit_file")
function_response = edit_file(
path=function_args.get("path"),
old_str=function_args.get("old_str"),
new_str=function_args.get("new_str")
)
messages_history.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
function_enriched_response = client.chat.completions.create(
model="Qwen/Qwen2.5-7B-Instruct-Turbo",
messages=messages_history,
)
messages_history.append({"role": "assistant", "content": function_enriched_response.choices[0].message.content})
print(f"LLM: {function_enriched_response.choices[0].message.content}")
else:
messages_history.append({"role": "assistant", "content": response.choices[0].message.content})
print(f"LLM: {response.choices[0].message.content}")
# start the chat
chat()