Adaline Class
The Adaline class is the main entry point for the Python SDK. It provides async methods for fetching deployments and initializing monitors for observability.
Constructor
Adaline( * , api_key = None , host = None , debug = False )
Parameters
API key for authentication. If omitted, reads from ADALINE_API_KEY environment variable.
host
str | None
default: "https://api.adaline.ai/v2"
Base URL of the Adaline API. Falls back to ADALINE_BASE_URL environment variable, then https://api.adaline.ai/v2.
If True, enables DEBUG-level logging on the adaline logger with a StreamHandler formatter.
Example
from adaline.main import Adaline
# Uses ADALINE_API_KEY environment variable
adaline = Adaline()
Methods
get_deployment()
Fetch a specific deployment by its ID. This is an async method.
async get_deployment( * , prompt_id: str , deployment_id: str ) -> Deployment
Parameters
The unique identifier of the prompt.
The unique identifier of the deployment.
Returns
Example
deployment = await adaline.get_deployment(
prompt_id = "prompt_abc123" ,
deployment_id = "deploy_xyz789"
)
print (deployment.prompt.config.model) # 'gpt-4o'
print (deployment.prompt.messages) # List of messages
print (deployment.prompt.tools) # List of tools
get_latest_deployment()
Fetch the latest deployment for a prompt in a specific environment. This is an async method.
async get_latest_deployment( * , prompt_id: str , deployment_environment_id: str ) -> Deployment
Parameters
The unique identifier of the prompt.
deployment_environment_id
The unique identifier of the deployment environment.
Returns
The latest Deployment object for the specified environment.
Example
deployment = await adaline.get_latest_deployment(
prompt_id = "prompt_abc123" ,
deployment_environment_id = "environment_abc123"
)
print (deployment.prompt.config.model)
print (deployment.prompt.config.provider_name)
print (deployment.prompt.messages)
init_latest_deployment()
Initialize a cached latest deployment with automatic background refresh. This is an async method and the recommended approach for production applications.
async init_latest_deployment(
* ,
prompt_id: str ,
deployment_environment_id: str ,
refresh_interval: int = 60 ,
max_continuous_failures: int = 3
) -> Controller
Parameters
The unique identifier of the prompt.
deployment_environment_id
The unique identifier of the deployment environment.
How often to refresh the cached deployment, in seconds. Clamped to range 1-600.
Maximum consecutive failures before stopping background refresh.
Returns
A Controller object with the following methods: get()
async (force_refresh: bool = False) -> Optional[Deployment]
Get the cached deployment. Pass force_refresh=True to force a fresh fetch.
Get a snapshot of the background refresh status.
Stop the background refresh task and clear the cache.
Example
Basic Usage
Health Monitoring
controller = await adaline.init_latest_deployment(
prompt_id = "prompt_abc123" ,
deployment_environment_id = "environment_abc123" ,
refresh_interval = 60
)
# Get cached deployment (instant, no API call)
deployment = await controller.get()
# Force fresh fetch (ignores cache, makes API call)
fresh = await controller.get( force_refresh = True )
# Stop background refresh when done
await controller.stop()
init_monitor()
Initialize a monitoring session for logging traces and spans. This is a synchronous method.
init_monitor(
* ,
project_id: str ,
flush_interval_seconds: int = 1 ,
max_buffer_size: int = 1000 ,
default_content: Optional[LogSpanContent] = None
) -> Monitor
Parameters
Unique identifier for your project. All traces and spans will be associated with this project.
How often to flush buffered entries to the API, in seconds.
Maximum number of buffered entries. When exceeded, oldest entries are dropped.
Default content for spans when no explicit content is provided. Defaults to LogSpanContent(actual_instance=LogSpanOtherContent(type="Other", input="{}", output="{}")).
Returns
A Monitor instance for creating traces and spans. See Monitor Class for details.
Example
Basic
Custom Configuration
monitor = adaline.init_monitor( project_id = "proj_abc123" )
trace = monitor.log_trace( name = "User Request" )
trace.end()
Complete Example
import asyncio
import json
from openai import AsyncOpenAI
from adaline.main import Adaline
from adaline_api.models.log_span_content import LogSpanContent
from adaline_api.models.log_span_model_content import LogSpanModelContent
async def main ():
adaline = Adaline( debug = True )
openai = AsyncOpenAI()
# Initialize deployment controller
controller = await adaline.init_latest_deployment(
prompt_id = "chatbot-prompt" ,
deployment_environment_id = "environment_abc123" ,
refresh_interval = 60
)
# Initialize monitor
monitor = adaline.init_monitor(
project_id = "chatbot-project" ,
flush_interval_seconds = 5 ,
max_buffer_size = 100
)
async def handle_chat ( user_id : str , message : str ):
deployment = await controller.get()
trace = monitor.log_trace(
name = "Chat Turn" ,
session_id = user_id,
tags = [ "chat" , "production" ],
attributes = { "user_id" : user_id, "message_length" : len (message)}
)
span = trace.log_span(
name = "LLM Completion" ,
prompt_id = deployment.prompt_id,
deployment_id = deployment.id,
run_evaluation = True ,
tags = [ "llm" , deployment.prompt.config.provider_name]
)
try :
response = await openai.chat.completions.create(
model = deployment.prompt.config.model,
messages = [
* deployment.prompt.messages,
{ "role" : "user" , "content" : message}
],
** deployment.prompt.config.settings
)
reply = response.choices[ 0 ].message.content
span.update({
"status" : "success" ,
"content" : LogSpanContent(
actual_instance = LogSpanModelContent(
type = "Model" ,
provider = deployment.prompt.config.provider_name,
model = deployment.prompt.config.model,
input = json.dumps( str (deployment.prompt.messages)),
output = json.dumps(response.choices[ 0 ].message.model_dump())
)
)
})
trace.update({ "status" : "success" })
return reply
except Exception :
span.update({ "status" : "failure" })
trace.update({ "status" : "failure" })
raise
finally :
span.end()
trace.end()
result = await handle_chat( "user-123" , "Hello, how are you?" )
print (result)
await monitor.flush()
monitor.stop()
await controller.stop()
asyncio.run(main())
Environment Variables
Variable Description Default ADALINE_API_KEYAPI key for authentication (none) ADALINE_BASE_URLAPI base URL https://api.adaline.ai/v2
Best Practices
Use Environment Variables
import os
# API key and base URL are read from environment by default
adaline = Adaline()
# Or explicitly pass them
adaline = Adaline(
api_key = os.environ[ "ADALINE_API_KEY" ],
host = os.environ.get( "ADALINE_BASE_URL" )
)
Initialize Once, Use Everywhere
from adaline.main import Adaline
adaline = Adaline()
controller = None
monitor = None
async def initialize ():
global controller, monitor
controller = await adaline.init_latest_deployment(
prompt_id = os.environ[ "PROMPT_ID" ],
deployment_environment_id = os.environ[ "ENVIRONMENT_ID" ]
)
monitor = adaline.init_monitor(
project_id = os.environ[ "PROJECT_ID" ]
)
Graceful Shutdown
async def shutdown ():
if monitor:
await monitor.flush()
monitor.stop()
if controller:
await controller.stop()