TypeScript SDK
The TypeScript SDK provides a complete toolkit for integrating Adaline’s LLM deployment and observability features into your AI applications.Installation
Copy
Ask AI
npm install @adaline/client @adaline/api
Quick Start
Copy
Ask AI
import { Adaline } from '@adaline/client';
// Initialize client (API key from ADALINE_API_KEY env var)
const adaline = new Adaline();
// Get latest deployment
const deployment = await adaline.getLatestDeployment({
promptId: 'your-prompt-id',
deploymentEnvironmentId: 'your-deployment-environment-id'
});
// Initialize monitoring
const monitor = adaline.initMonitor({
projectId: 'your-project-id',
flushInterval: 5,
maxBufferSize: 10
});
// Create trace and spans
const trace = monitor.logTrace({
name: 'User Request',
sessionId: 'session-123'
});
const span = trace.logSpan({
name: 'LLM Call',
promptId: deployment.promptId,
deploymentId: deployment.id
});
// Do LLM call using the 'deployment'
// Update the span with the response
span.update({
status: 'success',
content: {
type: 'Model',
provider: deployment.prompt.config.providerName,
model: deployment.prompt.config.model,
input: JSON.stringify(deployment.prompt.messages),
output: JSON.stringify(response)
}
});
trace.end();
Type Definitions
The SDK includes full TypeScript type definitions. Most types are imported from@adaline/api:
Copy
Ask AI
// API types (deployment, messages, content, logging)
import type {
Deployment,
DeploymentPrompt,
Message,
Content,
LogSpanContent,
CreateLogTraceRequest,
CreateLogSpanRequest
} from '@adaline/api';
// SDK-specific types
import type {
TraceStatus,
SpanStatus,
BufferedEntry
} from '@adaline/client';
Error Handling
The SDK uses automatic retry logic with exponential backoff:- 5xx errors: Automatically retried up to 10 times within a 20s budget
- 4xx errors: Fail immediately (no retry)
- Network errors: Retried with exponential backoff
monitor.failedFlushEntries and retried on the next flush attempt.
Real-World Examples
Example 1: RAG Pipeline
Copy
Ask AI
import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';
import { ChromaClient } from 'chromadb';
const adaline = new Adaline();
const gateway = new Gateway();
const openaiProvider = new OpenAI();
const chroma = new ChromaClient();
const monitor = adaline.initMonitor({ projectId: 'rag-system' });
async function answerQuestion(sessionId: string, question: string) {
const trace = monitor.logTrace({
name: 'RAG Query',
sessionId,
tags: ['rag', 'qa']
});
try {
// Step 1: Generate embedding
const embedSpan = trace.logSpan({
name: 'Generate Query Embedding',
tags: ['embedding']
});
const embeddingModel = openaiProvider.embeddingModel({
modelName: 'text-embedding-3-large',
apiKey: process.env.OPENAI_API_KEY!
});
const embeddingResponse = await gateway.getEmbeddings({
model: embeddingModel,
config: {},
embeddingRequests: {
modality: 'text',
requests: [question]
}
});
const embedding = embeddingResponse.response.embeddings[0].values;
embedSpan.update({
status: 'success',
content: {
type: 'Embeddings',
input: JSON.stringify({ query: question }),
output: JSON.stringify({ dimensions: embedding.length })
}
});
embedSpan.end();
// Step 2: Retrieve documents
const retrievalSpan = trace.logSpan({
name: 'Vector Search',
tags: ['retrieval']
});
const collection = await chroma.getCollection({ name: 'docs' });
const results = await collection.query({
queryEmbeddings: [embedding],
nResults: 5
});
retrievalSpan.update({
status: 'success',
content: {
type: 'Retrieval',
input: JSON.stringify({ query: question, topK: 5 }),
output: JSON.stringify({
documentIds: results.ids[0],
scores: results.distances?.[0]
})
}
});
retrievalSpan.end();
// Step 3: Get LLM deployment
const deployment = await adaline.getLatestDeployment({
promptId: 'rag-answer-prompt',
deploymentEnvironmentId: 'environment_abc123'
});
// Step 4: Generate answer
const llmSpan = trace.logSpan({
name: 'Generate Answer',
promptId: deployment.promptId,
deploymentId: deployment.id,
runEvaluation: true,
tags: ['llm', 'answer']
});
const context = results.documents[0].join('\n\n');
const chatModel = openaiProvider.chatModel({
modelName: deployment.prompt.config.model,
apiKey: process.env.OPENAI_API_KEY!
});
const gatewayResponse = await gateway.completeChat({
model: chatModel,
config: deployment.prompt.config.settings,
messages: [
...deployment.prompt.messages,
{
role: 'user',
content: [{ modality: 'text', value: `Context:\n${context}\n\nQuestion: ${question}` }]
}
],
tools: deployment.prompt.tools
});
const answer = gatewayResponse.response.messages[0].content[0].value;
llmSpan.update({
status: 'success',
content: {
type: 'Model',
provider: deployment.prompt.config.providerName,
model: deployment.prompt.config.model,
input: JSON.stringify(gatewayResponse.provider.request),
output: JSON.stringify(gatewayResponse.provider.response)
},
attributes: {
contextDocs: results.ids[0].length,
cached: gatewayResponse.cached,
latency: gatewayResponse.latencyInMs
}
});
llmSpan.end();
trace.update({ status: 'success' });
return answer;
} catch (error) {
trace.update({ status: 'failure', attributes: { error: String(error) } });
throw error;
} finally {
trace.end();
}
}
Example 2: Multi-Agent System
Copy
Ask AI
import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';
const adaline = new Adaline();
const gateway = new Gateway();
const openaiProvider = new OpenAI();
const monitor = adaline.initMonitor({ projectId: 'multi-agent' });
async function processRequest(userId: string, task: string) {
const trace = monitor.logTrace({
name: 'Multi-Agent Task',
sessionId: userId,
tags: ['agents', 'orchestration']
});
try {
// Router agent decides which specialist to use
const routerSpan = trace.logSpan({
name: 'Router Agent',
tags: ['agent', 'router']
});
const routerDeployment = await adaline.getLatestDeployment({
promptId: 'router-agent-prompt',
deploymentEnvironmentId: 'environment_abc123'
});
const routerModel = openaiProvider.chatModel({
modelName: routerDeployment.prompt.config.model,
apiKey: process.env.OPENAI_API_KEY!
});
const routerResponse = await gateway.completeChat({
model: routerModel,
config: routerDeployment.prompt.config.settings,
messages: [
...routerDeployment.prompt.messages,
{
role: 'user',
content: [{ modality: 'text', value: task }]
}
],
tools: routerDeployment.prompt.tools
});
const selectedAgent = JSON.parse(
routerResponse.response.messages[0].content[0].value || '{}'
).agent;
routerSpan.update({
status: 'success',
content: {
type: 'Model',
provider: routerDeployment.prompt.config.providerName,
model: routerDeployment.prompt.config.model,
input: JSON.stringify(routerResponse.provider.request),
output: JSON.stringify(routerResponse.provider.response)
},
attributes: { selectedAgent }
});
routerSpan.end();
// Execute specialist agent
const specialistSpan = trace.logSpan({
name: `${selectedAgent} Agent`,
tags: ['agent', 'specialist', selectedAgent]
});
const specialistDeployment = await adaline.getLatestDeployment({
promptId: `${selectedAgent}-agent-prompt`,
deploymentEnvironmentId: 'environment_abc123'
});
const specialistModel = openaiProvider.chatModel({
modelName: specialistDeployment.prompt.config.model,
apiKey: process.env.OPENAI_API_KEY!
});
const specialistResponse = await gateway.completeChat({
model: specialistModel,
config: specialistDeployment.prompt.config.settings,
messages: [
...specialistDeployment.prompt.messages,
{
role: 'user',
content: [{ modality: 'text', value: task }]
}
],
tools: specialistDeployment.prompt.tools
});
specialistSpan.update({
status: 'success',
content: {
type: 'Model',
provider: specialistDeployment.prompt.config.providerName,
model: specialistDeployment.prompt.config.model,
input: JSON.stringify(specialistResponse.provider.request),
output: JSON.stringify(specialistResponse.provider.response)
}
});
specialistSpan.end();
trace.update({ status: 'success', attributes: { agent: selectedAgent } });
return specialistResponse.response.messages[0].content[0].value;
} catch (error) {
trace.update({ status: 'failure', attributes: { error: String(error) } });
throw error;
} finally {
trace.end();
}
}