The example below demonstrates how to manually trace a tool function along with a chat completion response. You'll see how to create spans for both the tool and LLM to capture their input, output, and key events.
Before diving into the code, ensure you have correctly:
import json
#from your_tracer import tracer
def run_tool(tool_function, tool_args):
#first set context for current span
with tracer.start_as_current_span(
name="Tool - some tool",
attributes={
# Set these attributes before calling the tool, in case an exception is raised by the tool
**{
"openinference.span.kind": "TOOL",
"input.value": question,
"message.tool_calls.0.tool_call.function.name": tool_function.__name__,
"message.tool_calls.0.tool_call.function.arguments": json.dumps(
tool_args
),
},
},
) as tool_span:
#run tool; output is formatted prompt for chat completion
resulting_prompt = tool_function(input=tool_args)
# optional - set the resulting prompt as the tool span output
tool_span.set_attribute(
"message.tool_calls.0.tool_call.function.output", resulting_prompt
)
# This LLM span nests under the tool span in the trace
with tracer.start_as_current_span(
name="Tool - llm response",
# Set these attributes before calling the LLM
attributes={
"openinference.span.kind": "LLM",
"input.value": resulting_prompt,
},
) as llm_span:
llm_response = openai_client.chat.completions.create(
model=model_version,
messages=[current_user_message],
temperature=TEMPERATURE,
)
llm_span.set_attribute("output.value", llm_response)