// the list of tools is model-agnostic[{"type":"function","function":{"name":"get_delivery_date","description":"Get the delivery date for a customer's order","parameters":{"type":"object","properties":{"order_id":{"type":"string"}},"required":["order_id"]}}}]
此列表将根据模型的聊天模板注入到模型的 system 提示中。对于 Qwen2.5-Instruct,它看起来像这样:
<|im_start|>system
You are Qwen, created by Alibaba Cloud. You are a helpful assistant.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{"type":"function","function":{"name":"get_delivery_date","description":"Get the delivery date for a customer's order","parameters":{"type":"object","properties":{"order_id":{"type":"string"}},"required":["order_id"]}}}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>,"arguments": <args-json-object>}
</tool_call><|im_end|>
# pseudocode, see examples for copy-paste snippetsif response.has_tool_calls:
for each tool_call:
# Extract function name & args
function_to_call = tool_call.name # e.g. "get_delivery_date"
args = tool_call.arguments # e.g. {"order_id": "123"}# Execute the function
result = execute_function(function_to_call, args)
# Add result to conversation
add_to_messages([
ASSISTANT_TOOL_CALL_MESSAGE, # The request to use the tool
TOOL_RESULT_MESSAGE # The tool's response
])
else:
# Normal response without tools
add_to_messages(response.content)
然后,再次提示LLM更新后的messages数组,但不允许访问工具。这是因为:
LLM已经在对话历史中拥有工具的结果。
我们希望LLM向用户提供最终回复,而不是调用更多工具。
# Example messages
messages = [
{"role": "user", "content": "When will order 123 be delivered?"},
{"role": "assistant", "function_call": {
"name": "get_delivery_date",
"arguments": {"order_id": "123"}
}},
{"role": "tool", "content": "2024-03-15"},
]
response = client.chat.completions.create(
model="lmstudio-community/qwen2.5-7b-instruct",
messages=messages
)
-> % lms log stream
Streaming logs from LM Studio
timestamp: 11/13/2024, 9:35:15 AM
type: llm.prediction.input
modelIdentifier: gemma-2-2b-it
modelPath: lmstudio-community/gemma-2-2b-it-GGUF/gemma-2-2b-it-Q4_K_M.gguf
input: "<start_of_turn>system
You are a tool-calling AI. You can request calls to available tools with this EXACT format:
[TOOL_REQUEST]{"name": "tool_name", "arguments": {"param1": "value1"}}[END_TOOL_REQUEST]
AVAILABLE TOOLS:
{
"type": "toolArray",
"tools": [
{
"type": "function",
"function": {
"name": "get_delivery_date",
"description": "Get the delivery date for a customer's order",
"parameters": {
"type": "object",
"properties": {
"order_id": {
"type": "string"
}
},
"required": [
"order_id"
]
}
}
}
]
}
RULES:
- Only use tools from AVAILABLE TOOLS
- Include all required arguments
- Use one [TOOL_REQUEST] block per tool
- Never use [TOOL_RESULT]
- If you decide to call one or more tools, there should be no other text in your message
Examples:
"Check Paris weather"
[TOOL_REQUEST]{"name": "get_weather", "arguments": {"location": "Paris"}}[END_TOOL_REQUEST]
"Send email to John about meeting and open browser"
[TOOL_REQUEST]{"name": "send_email", "arguments": {"to": "John", "subject": "meeting"}}[END_TOOL_REQUEST]
[TOOL_REQUEST]{"name": "open_browser", "arguments": {}}[END_TOOL_REQUEST]
Respond conversationally if no matching tools exist.<end_of_turn>
<start_of_turn>user
Get me delivery date for order 123<end_of_turn>
<start_of_turn>model
"
from openai import OpenAI
# Connect to LM Studio
client = OpenAI(base_url="https://127.0.0.1:1234/v1", api_key="lm-studio")
# Define a simple functiondefsay_hello(name: str) -> str:
print(f"Hello, {name}!")
# Tell the AI about our function
tools = [
{
"type": "function",
"function": {
"name": "say_hello",
"description": "Says hello to someone",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The person's name"
}
},
"required": ["name"]
}
}
}
]
# Ask the AI to use our function
response = client.chat.completions.create(
model="lmstudio-community/qwen2.5-7b-instruct",
messages=[{"role": "user", "content": "Can you say hello to Bob the Builder?"}],
tools=tools
)
# Get the name the AI wants to use a tool to say hello to# (Assumes the AI has requested a tool call and that tool call is say_hello)
tool_call = response.choices[0].message.tool_calls[0]
name = eval(tool_call.function.arguments)["name"]
# Actually call the say_hello function
say_hello(name) # Prints: Hello, Bob the Builder!
从控制台运行此脚本应该会产生类似的结果
-> % python single-turn-example.py
Hello, Bob the Builder!
尝试更改其中的名称
messages=[{"role": "user", "content": "Can you say hello to Bob the Builder?"}]
以查看模型如何使用不同的名称调用say_hello函数。
多轮对话示例
现在来看一个稍微复杂一点的例子。
在这个例子中,我们将
使模型能够调用get_delivery_date函数
将调用该函数的结果返回给模型,以便它能够以纯文本形式满足用户的请求
multi-turn-example.py(点击展开)
from datetime import datetime, timedelta
import json
import random
from openai import OpenAI
# Point to the local server
client = OpenAI(base_url="https://127.0.0.1:1234/v1", api_key="lm-studio")
model = "lmstudio-community/qwen2.5-7b-instruct"defget_delivery_date(order_id: str) -> datetime:
# Generate a random delivery date between today and 14 days from now# in a real-world scenario, this function would query a database or API
today = datetime.now()
random_days = random.randint(1, 14)
delivery_date = today + timedelta(days=random_days)
print(
f"\nget_delivery_date function returns delivery date:\n\n{delivery_date}",
flush=True,
)
return delivery_date
tools = [
{
"type": "function",
"function": {
"name": "get_delivery_date",
"description": "Get the delivery date for a customer's order. Call this whenever you need to know the delivery date, for example when a customer asks 'Where is my package'",
"parameters": {
"type": "object",
"properties": {
"order_id": {
"type": "string",
"description": "The customer's order ID.",
},
},
"required": ["order_id"],
"additionalProperties": False,
},
},
}
]
messages = [
{
"role": "system",
"content": "You are a helpful customer support assistant. Use the supplied tools to assist the user.",
},
{
"role": "user",
"content": "Give me the delivery date and time for order number 1017",
},
]
# LM Studio
response = client.chat.completions.create(
model=model,
messages=messages,
tools=tools,
)
print("\nModel response requesting tool call:\n", flush=True)
print(response, flush=True)
# Extract the arguments for get_delivery_date# Note this code assumes we have already determined that the model generated a function call.
tool_call = response.choices[0].message.tool_calls[0]
arguments = json.loads(tool_call.function.arguments)
order_id = arguments.get("order_id")
# Call the get_delivery_date function with the extracted order_id
delivery_date = get_delivery_date(order_id)
assistant_tool_call_request_message = {
"role": "assistant",
"tool_calls": [
{
"id": response.choices[0].message.tool_calls[0].id,
"type": response.choices[0].message.tool_calls[0].type,
"function": response.choices[0].message.tool_calls[0].function,
}
],
}
# Create a message containing the result of the function call
function_call_result_message = {
"role": "tool",
"content": json.dumps(
{
"order_id": order_id,
"delivery_date": delivery_date.strftime("%Y-%m-%d %H:%M:%S"),
}
),
"tool_call_id": response.choices[0].message.tool_calls[0].id,
}
# Prepare the chat completion call payload
completion_messages_payload = [
messages[0],
messages[1],
assistant_tool_call_request_message,
function_call_result_message,
]
# Call the OpenAI API's chat completions endpoint to send the tool call result back to the model# LM Studio
response = client.chat.completions.create(
model=model,
messages=completion_messages_payload,
)
print("\nFinal model response with knowledge of the tool call result:\n", flush=True)
print(response.choices[0].message.content, flush=True)
从控制台运行此脚本应该会产生类似的结果
-> % python multi-turn-example.py
Model response requesting tool call:
ChatCompletion(id='chatcmpl-wwpstqqu94go4hvclqnpwn', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='377278620', function=Function(arguments='{"order_id":"1017"}', name='get_delivery_date'), type='function')]))], created=1730916196, model='lmstudio-community/qwen2.5-7b-instruct', object='chat.completion', service_tier=None, system_fingerprint='lmstudio-community/qwen2.5-7b-instruct', usage=CompletionUsage(completion_tokens=24, prompt_tokens=223, total_tokens=247, completion_tokens_details=None, prompt_tokens_details=None))
get_delivery_date function returns delivery date:
2024-11-19 13:03:17.773298
Final model response with knowledge of the tool call result:
Your order number 1017 is scheduled for delivery on November 19, 2024, at 13:03 PM.
import json
from urllib.parse import urlparse
import webbrowser
from datetime import datetime
import os
from openai import OpenAI
# Point to the local server
client = OpenAI(base_url="https://127.0.0.1:1234/v1", api_key="lm-studio")
model = "lmstudio-community/qwen2.5-7b-instruct"defis_valid_url(url: str) -> bool:
try:
result = urlparse(url)
returnbool(result.netloc) # Returns True if there's a valid network locationexcept Exception:
returnFalsedefopen_safe_url(url: str) -> dict:
# List of allowed domains (expand as needed)
SAFE_DOMAINS = {
"lmstudio.ai",
"github.com",
"google.com",
"wikipedia.org",
"weather.com",
"stackoverflow.com",
"python.org",
"docs.python.org",
}
try:
# Add http:// if no scheme is presentifnot url.startswith(('http://', 'https://')):
url = 'http://' + url
# Validate URL formatifnot is_valid_url(url):
return {"status": "error", "message": f"Invalid URL format: {url}"}
# Parse the URL and check domain
parsed_url = urlparse(url)
domain = parsed_url.netloc.lower()
base_domain = ".".join(domain.split(".")[-2:])
if base_domain in SAFE_DOMAINS:
webbrowser.open(url)
return {"status": "success", "message": f"Opened {url} in browser"}
else:
return {
"status": "error",
"message": f"Domain {domain} not in allowed list",
}
except Exception as e:
return {"status": "error", "message": str(e)}
defget_current_time() -> dict:
"""Get the current system time with timezone information"""try:
current_time = datetime.now()
timezone = datetime.now().astimezone().tzinfo
formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S %Z")
return {
"status": "success",
"time": formatted_time,
"timezone": str(timezone),
"timestamp": current_time.timestamp(),
}
except Exception as e:
return {"status": "error", "message": str(e)}
defanalyze_directory(path: str = ".") -> dict:
"""Count and categorize files in a directory"""try:
stats = {
"total_files": 0,
"total_dirs": 0,
"file_types": {},
"total_size_bytes": 0,
}
for entry in os.scandir(path):
if entry.is_file():
stats["total_files"] += 1
ext = os.path.splitext(entry.name)[1].lower() or"no_extension"
stats["file_types"][ext] = stats["file_types"].get(ext, 0) + 1
stats["total_size_bytes"] += entry.stat().st_size
elif entry.is_dir():
stats["total_dirs"] += 1# Add size of directory contentsfor root, _, files in os.walk(entry.path):
for file in files:
try:
stats["total_size_bytes"] += os.path.getsize(os.path.join(root, file))
except (OSError, FileNotFoundError):
continuereturn {"status": "success", "stats": stats, "path": os.path.abspath(path)}
except Exception as e:
return {"status": "error", "message": str(e)}
tools = [
{
"type": "function",
"function": {
"name": "open_safe_url",
"description": "Open a URL in the browser if it's deemed safe",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL to open",
},
},
"required": ["url"],
},
},
},
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "Get the current system time with timezone information",
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
},
},
{
"type": "function",
"function": {
"name": "analyze_directory",
"description": "Analyze the contents of a directory, counting files and folders",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "The directory path to analyze. Defaults to current directory if not specified.",
},
},
"required": [],
},
},
},
]
defprocess_tool_calls(response, messages):
"""Process multiple tool calls and return the final response and updated messages"""# Get all tool calls from the response
tool_calls = response.choices[0].message.tool_calls
# Create the assistant message with tool calls
assistant_tool_call_message = {
"role": "assistant",
"tool_calls": [
{
"id": tool_call.id,
"type": tool_call.type,
"function": tool_call.function,
}
for tool_call in tool_calls
],
}
# Add the assistant's tool call message to the history
messages.append(assistant_tool_call_message)
# Process each tool call and collect results
tool_results = []
for tool_call in tool_calls:
# For functions with no arguments, use empty dict
arguments = (
json.loads(tool_call.function.arguments)
if tool_call.function.arguments.strip()
else {}
)
# Determine which function to call based on the tool call nameif tool_call.function.name == "open_safe_url":
result = open_safe_url(arguments["url"])
elif tool_call.function.name == "get_current_time":
result = get_current_time()
elif tool_call.function.name == "analyze_directory":
path = arguments.get("path", ".")
result = analyze_directory(path)
else:
# llm tried to call a function that doesn't exist, skipcontinue# Add the result message
tool_result_message = {
"role": "tool",
"content": json.dumps(result),
"tool_call_id": tool_call.id,
}
tool_results.append(tool_result_message)
messages.append(tool_result_message)
# Get the final response
final_response = client.chat.completions.create(
model=model,
messages=messages,
)
return final_response
defchat():
messages = [
{
"role": "system",
"content": "You are a helpful assistant that can open safe web links, tell the current time, and analyze directory contents. Use these capabilities whenever they might be helpful.",
}
]
print(
"Assistant: Hello! I can help you open safe web links, tell you the current time, and analyze directory contents. What would you like me to do?"
)
print("(Type 'quit' to exit)")
whileTrue:
# Get user input
user_input = input("\nYou: ").strip()
# Check for quit commandif user_input.lower() == "quit":
print("Assistant: Goodbye!")
break# Add user message to conversation
messages.append({"role": "user", "content": user_input})
try:
# Get initial response
response = client.chat.completions.create(
model=model,
messages=messages,
tools=tools,
)
# Check if the response includes tool callsif response.choices[0].message.tool_calls:
# Process all tool calls and get final response
final_response = process_tool_calls(response, messages)
print("\nAssistant:", final_response.choices[0].message.content)
# Add assistant's final response to messages
messages.append(
{
"role": "assistant",
"content": final_response.choices[0].message.content,
}
)
else:
# If no tool call, just print the responseprint("\nAssistant:", response.choices[0].message.content)
# Add assistant's response to messages
messages.append(
{
"role": "assistant",
"content": response.choices[0].message.content,
}
)
except Exception as e:
print(f"\nAn error occurred: {str(e)}")
exit(1)
if __name__ == "__main__":
chat()
从控制台运行此脚本将允许您与代理进行聊天
-> % python agent-example.py
Assistant: Hello! I can help you open safe web links, tell you the current time, and analyze directory contents. What would you like me to do?
(Type 'quit' to exit)
You: What time is it?
Assistant: The current time is 14:11:40 (EST) as of November 6, 2024.
You: What time is it now?
Assistant: The current time is 14:13:59 (EST) as of November 6, 2024.
You: Open lmstudio.ai
Assistant: The link to lmstudio.ai has been opened in your default web browser.
You: What's in my current directory?
Assistant: Your current directory at `/Users/matt/project` contains a total of 14 files and 8 directories. Here's the breakdown:
- Files without an extension: 3
- `.mjs` files: 2
- `.ts` (TypeScript) files: 3
- Markdown (`md`) file: 1
- JSON files: 4
- TOML file: 1
The total size of these items is 1,566,990,604 bytes.
You: Thank you!
Assistant: You're welcome! If you have any other questions or need further assistance, feel free to ask.
You:
from openai import OpenAI
import time
client = OpenAI(base_url="http://127.0.0.1:1234/v1", api_key="lm-studio")
MODEL = "lmstudio-community/qwen2.5-7b-instruct"
TIME_TOOL = {
"type": "function",
"function": {
"name": "get_current_time",
"description": "Get the current time, only if asked",
"parameters": {"type": "object", "properties": {}},
},
}
defget_current_time():
return {"time": time.strftime("%H:%M:%S")}
defprocess_stream(stream, add_assistant_label=True):
"""Handle streaming responses from the API"""
collected_text = ""
tool_calls = []
first_chunk = Truefor chunk in stream:
delta = chunk.choices[0].delta
# Handle regular text outputif delta.content:
if first_chunk:
print()
if add_assistant_label:
print("Assistant:", end=" ", flush=True)
first_chunk = Falseprint(delta.content, end="", flush=True)
collected_text += delta.content
# Handle tool callselif delta.tool_calls:
for tc in delta.tool_calls:
iflen(tool_calls) <= tc.index:
tool_calls.append({
"id": "", "type": "function",
"function": {"name": "", "arguments": ""}
})
tool_calls[tc.index] = {
"id": (tool_calls[tc.index]["id"] + (tc.idor"")),
"type": "function",
"function": {
"name": (tool_calls[tc.index]["function"]["name"] + (tc.function.name or"")),
"arguments": (tool_calls[tc.index]["function"]["arguments"] + (tc.function.arguments or""))
}
}
return collected_text, tool_calls
defchat_loop():
messages = []
print("Assistant: Hi! I am an AI agent empowered with the ability to tell the current time (Type 'quit' to exit)")
whileTrue:
user_input = input("\nYou: ").strip()
if user_input.lower() == "quit":
break
messages.append({"role": "user", "content": user_input})
# Get initial response
response_text, tool_calls = process_stream(
client.chat.completions.create(
model=MODEL,
messages=messages,
tools=[TIME_TOOL],
stream=True,
temperature=0.2
)
)
ifnot tool_calls:
print()
text_in_first_response = len(response_text) > 0if text_in_first_response:
messages.append({"role": "assistant", "content": response_text})
# Handle tool calls if anyif tool_calls:
tool_name = tool_calls[0]["function"]["name"]
print()
ifnot text_in_first_response:
print("Assistant:", end=" ", flush=True)
print(f"**Calling Tool: {tool_name}**")
messages.append({"role": "assistant", "tool_calls": tool_calls})
# Execute tool callsfor tool_call in tool_calls:
if tool_call["function"]["name"] == "get_current_time":
result = get_current_time()
messages.append({
"role": "tool",
"content": str(result),
"tool_call_id": tool_call["id"]
})
# Get final response after tool execution
final_response, _ = process_stream(
client.chat.completions.create(
model=MODEL,
messages=messages,
stream=True
),
add_assistant_label=False
)
if final_response:
print()
messages.append({"role": "assistant", "content": final_response})
if __name__ == "__main__":
chat_loop()
您可以通过从控制台运行此脚本来与机器人聊天
-> % python tool-streaming-chatbot.py
Assistant: Hi! I am an AI agent empowered with the ability to tell the current time (Type 'quit' to exit)
You: Tell me a joke, then tell me the current time
Assistant: Sure! Here's a light joke for you: Why don't scientists trust atoms? Because they make up everything.
Now, let me get the current time for you.
**Calling Tool: get_current_time**
The current time is 18:49:31. Enjoy your day!
You: