Interface for language model providers (OpenAI, Anthropic, Google, Ollama)
More...
#include <llm_interface.h>
Interface for language model providers (OpenAI, Anthropic, Google, Ollama)
◆ chat()
Generate completion from a list of messages.
- Parameters
-
messages | The messages to generate completion from |
- Returns
- The LLM response
◆ chatAsync()
virtual Task< LLMResponse > agents::LLMInterface::chatAsync |
( |
const std::vector< Message > & | messages | ) |
|
|
virtual |
Async chat from a list of messages.
- Parameters
-
messages | The messages to generate completion from |
- Returns
- The LLM response
◆ chatWithTools()
virtual LLMResponse agents::LLMInterface::chatWithTools |
( |
const std::vector< Message > & | messages, |
|
|
const std::vector< std::shared_ptr< Tool > > & | tools ) |
|
pure virtual |
Generate completion with available tools.
- Parameters
-
messages | The messages to generate completion from |
tools | The tools to use |
- Returns
- The LLM response
◆ chatWithToolsAsync()
virtual Task< LLMResponse > agents::LLMInterface::chatWithToolsAsync |
( |
const std::vector< Message > & | messages, |
|
|
const std::vector< std::shared_ptr< Tool > > & | tools ) |
|
virtual |
Async chat with tools.
- Parameters
-
messages | The messages to generate completion from |
tools | The tools to use |
- Returns
- The LLM response
◆ complete() [1/2]
virtual LLMResponse agents::LLMInterface::complete |
( |
const std::vector< Message > & | messages | ) |
|
|
virtual |
Generate completion from a list of messages.
- Parameters
-
messages | The messages to generate completion from |
- Returns
- The LLM response
◆ complete() [2/2]
Generate completion from a prompt.
- Parameters
-
prompt | The prompt to generate completion from |
- Returns
- The LLM response
◆ completeAsync() [1/2]
virtual Task< LLMResponse > agents::LLMInterface::completeAsync |
( |
const std::vector< Message > & | messages | ) |
|
|
virtual |
Async complete from a list of messages.
- Parameters
-
messages | The messages to generate completion from |
- Returns
- The LLM response
◆ completeAsync() [2/2]
virtual Task< LLMResponse > agents::LLMInterface::completeAsync |
( |
const String & | prompt | ) |
|
|
virtual |
Async complete from a prompt.
- Parameters
-
prompt | The prompt to generate completion from |
- Returns
- The LLM response
◆ completeWithTools()
virtual LLMResponse agents::LLMInterface::completeWithTools |
( |
const std::vector< Message > & | messages, |
|
|
const std::vector< JsonObject > & | tools_schema ) |
|
virtual |
Generate completion with available tools.
- Parameters
-
messages | The messages to generate completion from |
tools_schema | The tools schema to use |
- Returns
- The LLM response
◆ getAvailableModels()
virtual std::vector< String > agents::LLMInterface::getAvailableModels |
( |
| ) |
|
|
pure virtual |
Get available models from this provider.
- Returns
- The available models
◆ getModel()
virtual String agents::LLMInterface::getModel |
( |
| ) |
const |
|
pure virtual |
Get current model.
- Returns
- The current model
◆ getOptions()
virtual LLMOptions agents::LLMInterface::getOptions |
( |
| ) |
const |
|
pure virtual |
Get current options.
- Returns
- The current options
◆ setApiBase()
virtual void agents::LLMInterface::setApiBase |
( |
const String & | api_base | ) |
|
|
pure virtual |
Set API base URL (for self-hosted or proxied endpoints)
- Parameters
-
api_base | The API base URL to use |
◆ setApiKey()
virtual void agents::LLMInterface::setApiKey |
( |
const String & | api_key | ) |
|
|
pure virtual |
Set API key.
- Parameters
-
api_key | The API key to use |
◆ setModel()
virtual void agents::LLMInterface::setModel |
( |
const String & | model | ) |
|
|
pure virtual |
Set the model to use.
- Parameters
-
◆ setOptions()
virtual void agents::LLMInterface::setOptions |
( |
const LLMOptions & | options | ) |
|
|
pure virtual |
Set options for API calls.
- Parameters
-
options | The options to use |
◆ streamChat()
virtual void agents::LLMInterface::streamChat |
( |
const std::vector< Message > & | messages, |
|
|
std::function< void(const String &, bool)> | callback ) |
|
pure virtual |
Stream results with callback.
- Parameters
-
messages | The messages to generate completion from |
callback | The callback to use |
◆ streamChatAsync()
virtual AsyncGenerator< String > agents::LLMInterface::streamChatAsync |
( |
const std::vector< Message > & | messages | ) |
|
|
virtual |
Stream chat with AsyncGenerator.
- Parameters
-
messages | The messages to generate completion from |
- Returns
- The AsyncGenerator