inference_provider_sdk 1.0.2 copy "inference_provider_sdk: ^1.0.2" to clipboard
inference_provider_sdk: ^1.0.2 copied to clipboard

Flutter/Dart SDK for Inference Provider API - Build powerful AI agents with RAG, tool calling, and MCP integration

example/main.dart

import 'package:inference_provider_sdk/inference_provider_sdk.dart';

void main() async {
  // Initialize the client
  final client = InferenceProviderClient(
    apiKey: 'ip_your_api_key',
    apiSecret: 'ips_your_api_secret',
    baseUrl: 'https://your-api-domain.com',
  );

  try {
    // =========================================================================
    // AGENT MANAGEMENT
    // =========================================================================

    print('=== Creating Agent ===');
    final agent = await client.createAgent(
      CreateAgentRequest(
        name: 'My AI Assistant',
        systemPrompt: 'You are a helpful AI assistant that provides clear and concise answers.',
        maxTokens: 1000,
        temperature: 0.7,
      ),
    );
    print('Created agent: ${agent.id} - ${agent.name}');

    // List all agents
    print('\n=== Listing All Agents ===');
    final agents = await client.listAgents();
    print('Total agents: ${agents.length}');
    for (final a in agents) {
      print('  - ${a.name} (${a.id})');
    }

    // =========================================================================
    // INFERENCE
    // =========================================================================

    print('\n=== Running Inference ===');
    final response = await client.runInference(
      InferenceRequest(
        agentId: agent.id,
        prompt: 'Hello! Can you tell me about artificial intelligence?',
      ),
    );
    print('Response: ${response.text}');
    print('Session ID: ${response.sessionId}');
    if (response.usage != null) {
      print('Tokens used: ${response.usage!.totalTokens}');
    }

    // Continue conversation
    print('\n=== Continuing Conversation ===');
    final followUp = await client.runInference(
      InferenceRequest(
        agentId: agent.id,
        prompt: 'Can you give me a specific example?',
        sessionId: response.sessionId, // Maintain context
      ),
    );
    print('Follow-up response: ${followUp.text}');

    // Get conversation history
    print('\n=== Getting Conversation History ===');
    final history = await client.getConversationHistory(response.sessionId);
    print('Conversation has ${history.length} messages:');
    for (final message in history) {
      print('  ${message.role}: ${message.content.substring(0, 50)}...');
    }

    // =========================================================================
    // VISION INFERENCE
    // =========================================================================

    print('\n=== Running Vision Inference ===');
    final visionResponse = await client.runVisionInference(
      agentId: agent.id,
      prompt: 'What do you see in this image?',
      imageUrl: 'https://example.com/image.jpg',
    );
    print('Vision response: ${visionResponse.text}');

    // =========================================================================
    // PROVIDER MANAGEMENT
    // =========================================================================

    print('\n=== Listing AI Providers ===');
    final providers = await client.listProviders();
    print('Total providers: ${providers.length}');
    for (final provider in providers) {
      print('  - ${provider.name} (${provider.providerType})');

      // List models for this provider
      final models = await client.listModels(provider.id);
      print('    Models: ${models.length}');
      for (final model in models) {
        print('      - ${model.name}: ${model.modelId}');
      }
    }

    // =========================================================================
    // UPDATE AGENT
    // =========================================================================

    print('\n=== Updating Agent ===');
    final updatedAgent = await client.updateAgent(
      agent.id,
      UpdateAgentRequest(
        name: 'Updated AI Assistant',
        temperature: 0.9,
      ),
    );
    print('Updated agent: ${updatedAgent.name}');

    // =========================================================================
    // CLEAN UP
    // =========================================================================

    print('\n=== Deleting Agent ===');
    await client.deleteAgent(agent.id);
    print('Agent deleted successfully');

  } on AuthenticationException catch (e) {
    print('Authentication error: ${e.message}');
  } on NotFoundException catch (e) {
    print('Not found: ${e.message}');
  } on RateLimitException catch (e) {
    print('Rate limited. Retry after ${e.retryAfter} seconds');
  } on ValidationException catch (e) {
    print('Validation error: ${e.message}');
  } on ServerException catch (e) {
    print('Server error: ${e.message}');
  } on NetworkException catch (e) {
    print('Network error: ${e.message}');
  } on InferenceProviderException catch (e) {
    print('Error: ${e.message}');
  } finally {
    // Always dispose the client when done
    client.dispose();
  }
}
0
likes
80
points
15
downloads

Publisher

unverified uploader

Weekly Downloads

Flutter/Dart SDK for Inference Provider API - Build powerful AI agents with RAG, tool calling, and MCP integration

Repository (GitHub)
View/report issues

Topics

#ai #llm #agent #rag #inference

Documentation

Documentation
API reference

License

MIT (license)

Dependencies

http, json_annotation

More

Packages that depend on inference_provider_sdk