Skip to main content

Adaline Class

The Adaline class is the main entry point for the TypeScript SDK. It provides methods for fetching deployments and initializing monitors for observability.

Constructor

new Adaline(options?: AdalineOptions)

Parameters

options
object
Configuration options for the Adaline client

Example

import { Adaline } from '@adaline/client';

// Uses ADALINE_API_KEY environment variable
const adaline = new Adaline();

Methods

getDeployment()

Fetch a specific deployment by its ID.
async getDeployment(options: GetDeploymentOptions): Promise<Deployment>

Parameters

options
object
required

Returns

deployment
Deployment
The Deployment object.See Deployment Type for complete documentation.

Example

const deployment = await adaline.getDeployment({
  promptId: 'prompt_abc123',
  deploymentId: 'deploy_xyz789'
});

console.log(deployment.prompt.config.model); // 'gpt-4o'
console.log(deployment.prompt.messages);     // Array of messages
console.log(deployment.prompt.tools);        // Array of tools

getLatestDeployment()

Fetch the latest deployment for a prompt in a specific environment.
async getLatestDeployment(options: GetLatestDeploymentOptions): Promise<Deployment>

Parameters

options
object
required

Returns

deployment
Deployment
The latest Deployment object for the specified environment.See Deployment Type for complete documentation.

Example

const deployment = await adaline.getLatestDeployment({
  promptId: 'prompt_abc123',
  deploymentEnvironmentId: 'environment_abc123'
});

// Use Adaline Gateway to call the LLM with deployment config
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const gateway = new Gateway();
const openaiProvider = new OpenAI();

const model = openaiProvider.chatModel({
  modelName: deployment.prompt.config.model,
  apiKey: process.env.OPENAI_API_KEY!
});

const response = await gateway.completeChat({
  model,
  config: deployment.prompt.config.settings,
  messages: deployment.prompt.messages,
  tools: deployment.prompt.tools
});

initLatestDeployment()

Initialize a cached latest deployment with automatic background refresh. This is the recommended approach for production applications.
async initLatestDeployment(options: InitLatestDeploymentOptions): Promise<DeploymentController>

Parameters

options
object
required

Returns

controller
DeploymentController
A controller object with the following methods:

Example

const controller = await adaline.initLatestDeployment({
  promptId: 'prompt_abc123',
  deploymentEnvironmentId: 'environment_abc123',
  refreshInterval: 60 // refresh every 60 seconds
});

// Get cached deployment (instant, no API call)
const deployment = await controller.get();

// Use the deployment
console.log(deployment.prompt.config.model);

// Stop background refresh when done
controller.stop();

initMonitor()

Initialize a monitoring session for logging traces and spans.
initMonitor(options: InitMonitorOptions): Monitor

Parameters

options
object
required

Returns

monitor
Monitor
A Monitor instance for creating traces and spans. See Monitor Class for details.

Example

const monitor = adaline.initMonitor({
  projectId: 'proj_abc123'
});

const trace = monitor.logTrace({ name: 'User Request' });
// ... log spans ...
trace.end();

Complete Example

Here’s a complete example showing all methods working together:
import { Adaline } from '@adaline/client';
import { Gateway } from '@adaline/gateway';
import { OpenAI } from '@adaline/openai';

const adaline = new Adaline({
  logger: (msg, err) => console.log(`[Adaline] ${msg}`, err)
});

const gateway = new Gateway();
const openaiProvider = new OpenAI();

// Initialize deployment controller
const deploymentController = await adaline.initLatestDeployment({
  promptId: 'chatbot-prompt',
  deploymentEnvironmentId: 'environment_abc123',
  refreshInterval: 60
});

// Initialize monitor
const monitor = adaline.initMonitor({
  projectId: 'chatbot-project',
  flushInterval: 5,
  maxBufferSize: 10
});

// Handle chat request
async function handleChat(userId: string, message: string) {
  // Get cached deployment (no API call)
  const deployment = await deploymentController.get();

  // Create trace for this conversation
  const trace = monitor.logTrace({
    name: 'Chat Turn',
    sessionId: userId,
    tags: ['chat', 'production'],
    attributes: { userId, messageLength: message.length }
  });

  // Log LLM call
  const span = trace.logSpan({
    name: 'LLM Completion',
    promptId: deployment.promptId,
    deploymentId: deployment.id,
    runEvaluation: true,
    tags: ['llm', deployment.prompt.config.providerName]
  });

  try {
    // Create model from deployment config
    const model = openaiProvider.chatModel({
      modelName: deployment.prompt.config.model,
      apiKey: process.env.OPENAI_API_KEY!
    });

    // Call LLM using Adaline Gateway
    const gatewayResponse = await gateway.completeChat({
      model,
      config: deployment.prompt.config.settings,
      messages: [
        ...deployment.prompt.messages,
        {
          role: 'user',
          content: [{ modality: 'text', value: message }]
        }
      ],
      tools: deployment.prompt.tools
    });

    const reply = gatewayResponse.response.messages[0].content[0].value;

    // Update span with success
    span.update({
      status: 'success',
      content: {
        type: 'Model',
        provider: deployment.prompt.config.providerName,
        model: deployment.prompt.config.model,
        input: JSON.stringify(gatewayResponse.provider.request),
        output: JSON.stringify(gatewayResponse.provider.response)
      },
      attributes: {
        cached: gatewayResponse.cached,
      }
    });

    trace.update({ status: 'success' });

    return reply;

  } catch (error) {
    span.update({
      status: 'failure',
      attributes: {
        error: error instanceof Error ? error.message : String(error)
      }
    });
    trace.update({ status: 'failure' });
    throw error;

  } finally {
    span.end();
    trace.end();
    // Auto-flushes based on interval/buffer size
  }
}

// Graceful shutdown
process.on('SIGTERM', async () => {
  await monitor.flush();
  monitor.stop();
  deploymentController.stop();
  console.log('Shutdown complete');
});

Type Definitions

interface AdalineOptions {
  apiKey?: string;
  baseURL?: string;
  logger?: (msg: string, err?: unknown) => void;
}

interface GetDeploymentOptions {
  promptId: string;
  deploymentId: string;
}

interface GetLatestDeploymentOptions {
  promptId: string;
  deploymentEnvironmentId: string;
}

interface InitLatestDeploymentOptions {
  promptId: string;
  deploymentEnvironmentId: string;
  refreshInterval?: number;
  maxContinuousFailures?: number;
}

interface DeploymentController {
  get: (forceRefresh?: boolean) => Promise<Deployment | undefined>;
  backgroundStatus: () => BackgroundStatus;
  stop: () => void;
}

interface BackgroundStatus {
  stopped: boolean;
  consecutiveFailures: number;
  lastError: Error | null;
  lastRefreshed: Date;
}

interface InitMonitorOptions {
  projectId: string;
  flushInterval?: number;
  maxBufferSize?: number;
  defaultContent?: LogSpanContent;
  maxContinuousFlushFailures?: number;
}

Best Practices

Use Environment Variables

const adaline = new Adaline({
  apiKey: process.env.ADALINE_API_KEY,
  baseURL: process.env.ADALINE_API_URL || 'https://api.adaline.ai/v2'
});

Initialize Once, Use Everywhere

// Initialize at app startup
let deploymentController: DeploymentController;
let monitor: Monitor;

async function initialize() {
  const adaline = new Adaline();
  
  deploymentController = await adaline.initLatestDeployment({
    promptId: process.env.PROMPT_ID!,
    deploymentEnvironmentId: process.env.ENVIRONMENT!
  });
  
  monitor = adaline.initMonitor({
    projectId: process.env.PROJECT_ID!
  });
}

// Export for use throughout your app
export { deploymentController, monitor };

Monitor Health in Production

const controller = await adaline.initLatestDeployment({ /* ... */ });

// Set up health check
setInterval(() => {
  const status = controller.backgroundStatus();
  
  if (status.stopped) {
    // Alert: Background refresh stopped!
    logger.error('Deployment refresh stopped', status.lastError);
  }
  
  if (status.consecutiveFailures >= 2) {
    // Warning: Having issues
    logger.warn('Deployment refresh failures', {
      failures: status.consecutiveFailures,
      lastError: status.lastError
    });
  }
}, 60000); // Check every minute

Handle Graceful Shutdown

async function gracefulShutdown() {
  console.log('Shutting down...');
  
  // Flush remaining logs
  await monitor.flush();
  
  // Stop background processes
  monitor.stop();
  deploymentController.stop();
  
  console.log('Shutdown complete');
  process.exit(0);
}

process.on('SIGTERM', gracefulShutdown);
process.on('SIGINT', gracefulShutdown);