Set up GroqInstrumentor to trace calls to Groq LLM in the application and sends the traces to an Arize model endpoint as defined below.
from openinference.instrumentation.groq import GroqInstrumentor# Import open-telemetry dependenciesfrom arize_otel import register_otel, Endpoints# Setup OTEL via our convenience functionregister_otel( endpoints = Endpoints.ARIZE, space_id ="your-space-id", # in app space settings page api_key ="your-api-key", # in app space settings page model_id ="your-model-id", # name this to whatever you would like model_version ="your-model-version"# the version of the model e.g. 'v1')GroqInstrumentor().instrument()
Run a simple Chat Completion via Groq and see it instrumented
import osfrom groq import Groq# get your groq api key by visiting https://groq.com/os.environ["GROQ_API_KEY"]="your-groq-api-key"client =Groq()# send a request to the groq clientchat_completion = client.chat.completions.create( messages=[ {"role": "user","content": "Explain the importance of low latency LLMs", } ], model="mixtral-8x7b-32768",)print(chat_completion.choices[0].message.content)