Agents 0.0.2
Edge AI Agents SDK
Loading...
Searching...
No Matches
LLMInterface Class Referenceabstract

Interface for language model providers (OpenAI, Anthropic, Google, Ollama) More...

#include <llm_interface.h>

Inheritance diagram for LLMInterface:
agents::llms::AnthropicLLM agents::llms::GoogleLLM agents::llms::OllamaLLM agents::llms::OpenAILLM

Public Member Functions

virtual ~LLMInterface ()=default
 Destructor.
 
virtual std::vector< StringgetAvailableModels ()=0
 Get available models from this provider.
 
virtual void setModel (const String &model)=0
 Set the model to use.
 
virtual String getModel () const=0
 Get current model.
 
virtual void setApiKey (const String &api_key)=0
 Set API key.
 
virtual void setApiBase (const String &api_base)=0
 Set API base URL (for self-hosted or proxied endpoints)
 
virtual void setOptions (const LLMOptions &options)=0
 Set options for API calls.
 
virtual LLMOptions getOptions () const=0
 Get current options.
 
virtual LLMResponse complete (const String &prompt)
 Generate completion from a prompt.
 
virtual LLMResponse complete (const std::vector< Message > &messages)
 Generate completion from a list of messages.
 
virtual LLMResponse completeWithTools (const std::vector< Message > &messages, const std::vector< JsonObject > &tools_schema)
 Generate completion with available tools.
 
virtual LLMResponse chat (const std::vector< Message > &messages)=0
 Generate completion from a list of messages.
 
virtual LLMResponse chatWithTools (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools)=0
 Generate completion with available tools.
 
virtual void streamChat (const std::vector< Message > &messages, std::function< void(const String &, bool)> callback)=0
 Stream results with callback.
 
virtual Task< LLMResponsecompleteAsync (const String &prompt)
 Async complete from a prompt.
 
virtual Task< LLMResponsecompleteAsync (const std::vector< Message > &messages)
 Async complete from a list of messages.
 
virtual Task< LLMResponsechatAsync (const std::vector< Message > &messages)
 Async chat from a list of messages.
 
virtual Task< LLMResponsechatWithToolsAsync (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools)
 Async chat with tools.
 
virtual AsyncGenerator< StringstreamChatAsync (const std::vector< Message > &messages)
 Stream chat with AsyncGenerator.
 

Detailed Description

Interface for language model providers (OpenAI, Anthropic, Google, Ollama)

Member Function Documentation

◆ chat()

virtual LLMResponse agents::LLMInterface::chat ( const std::vector< Message > & messages)
pure virtual

Generate completion from a list of messages.

Parameters
messagesThe messages to generate completion from
Returns
The LLM response

◆ chatAsync()

virtual Task< LLMResponse > agents::LLMInterface::chatAsync ( const std::vector< Message > & messages)
virtual

Async chat from a list of messages.

Parameters
messagesThe messages to generate completion from
Returns
The LLM response

◆ chatWithTools()

virtual LLMResponse agents::LLMInterface::chatWithTools ( const std::vector< Message > & messages,
const std::vector< std::shared_ptr< Tool > > & tools )
pure virtual

Generate completion with available tools.

Parameters
messagesThe messages to generate completion from
toolsThe tools to use
Returns
The LLM response

◆ chatWithToolsAsync()

virtual Task< LLMResponse > agents::LLMInterface::chatWithToolsAsync ( const std::vector< Message > & messages,
const std::vector< std::shared_ptr< Tool > > & tools )
virtual

Async chat with tools.

Parameters
messagesThe messages to generate completion from
toolsThe tools to use
Returns
The LLM response

◆ complete() [1/2]

virtual LLMResponse agents::LLMInterface::complete ( const std::vector< Message > & messages)
virtual

Generate completion from a list of messages.

Parameters
messagesThe messages to generate completion from
Returns
The LLM response

◆ complete() [2/2]

virtual LLMResponse agents::LLMInterface::complete ( const String & prompt)
virtual

Generate completion from a prompt.

Parameters
promptThe prompt to generate completion from
Returns
The LLM response

◆ completeAsync() [1/2]

virtual Task< LLMResponse > agents::LLMInterface::completeAsync ( const std::vector< Message > & messages)
virtual

Async complete from a list of messages.

Parameters
messagesThe messages to generate completion from
Returns
The LLM response

◆ completeAsync() [2/2]

virtual Task< LLMResponse > agents::LLMInterface::completeAsync ( const String & prompt)
virtual

Async complete from a prompt.

Parameters
promptThe prompt to generate completion from
Returns
The LLM response

◆ completeWithTools()

virtual LLMResponse agents::LLMInterface::completeWithTools ( const std::vector< Message > & messages,
const std::vector< JsonObject > & tools_schema )
virtual

Generate completion with available tools.

Parameters
messagesThe messages to generate completion from
tools_schemaThe tools schema to use
Returns
The LLM response

◆ getAvailableModels()

virtual std::vector< String > agents::LLMInterface::getAvailableModels ( )
pure virtual

Get available models from this provider.

Returns
The available models

◆ getModel()

virtual String agents::LLMInterface::getModel ( ) const
pure virtual

Get current model.

Returns
The current model

◆ getOptions()

virtual LLMOptions agents::LLMInterface::getOptions ( ) const
pure virtual

Get current options.

Returns
The current options

◆ setApiBase()

virtual void agents::LLMInterface::setApiBase ( const String & api_base)
pure virtual

Set API base URL (for self-hosted or proxied endpoints)

Parameters
api_baseThe API base URL to use

◆ setApiKey()

virtual void agents::LLMInterface::setApiKey ( const String & api_key)
pure virtual

Set API key.

Parameters
api_keyThe API key to use

◆ setModel()

virtual void agents::LLMInterface::setModel ( const String & model)
pure virtual

Set the model to use.

Parameters
modelThe model to use

◆ setOptions()

virtual void agents::LLMInterface::setOptions ( const LLMOptions & options)
pure virtual

Set options for API calls.

Parameters
optionsThe options to use

◆ streamChat()

virtual void agents::LLMInterface::streamChat ( const std::vector< Message > & messages,
std::function< void(const String &, bool)> callback )
pure virtual

Stream results with callback.

Parameters
messagesThe messages to generate completion from
callbackThe callback to use

◆ streamChatAsync()

virtual AsyncGenerator< String > agents::LLMInterface::streamChatAsync ( const std::vector< Message > & messages)
virtual

Stream chat with AsyncGenerator.

Parameters
messagesThe messages to generate completion from
Returns
The AsyncGenerator