Agents 0.0.2
Edge AI Agents SDK
|
Interface for language model providers (OpenAI, Anthropic, Google, Ollama) More...
#include <llm_interface.h>
Public Member Functions | |
virtual | ~LLMInterface ()=default |
Destructor. | |
virtual std::vector< String > | getAvailableModels ()=0 |
Get available models from this provider. | |
virtual void | setModel (const String &model)=0 |
Set the model to use. | |
virtual String | getModel () const =0 |
Get current model. | |
virtual void | setApiKey (const String &api_key)=0 |
Set API key. | |
virtual void | setApiBase (const String &api_base)=0 |
Set API base URL (for self-hosted or proxied endpoints) | |
virtual void | setOptions (const LLMOptions &options)=0 |
Set options for API calls. | |
virtual LLMOptions | getOptions () const =0 |
Get current options. | |
virtual LLMResponse | complete (const String &prompt) |
Generate completion from a prompt. | |
virtual LLMResponse | complete (const std::vector< Message > &messages) |
Generate completion from a list of messages. | |
virtual LLMResponse | completeWithTools (const std::vector< Message > &messages, const std::vector< JsonObject > &tools_schema) |
Generate completion with available tools. | |
virtual LLMResponse | chat (const std::vector< Message > &messages)=0 |
Generate completion from a list of messages. | |
virtual LLMResponse | chatWithTools (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools)=0 |
Generate completion with available tools. | |
virtual void | streamChat (const std::vector< Message > &messages, std::function< void(const String &, bool)> callback)=0 |
Stream results with callback. | |
virtual Task< LLMResponse > | completeAsync (const String &prompt) |
Async complete from a prompt. | |
virtual Task< LLMResponse > | completeAsync (const std::vector< Message > &messages) |
Async complete from a list of messages. | |
virtual Task< LLMResponse > | chatAsync (const std::vector< Message > &messages) |
Async chat from a list of messages. | |
virtual Task< LLMResponse > | chatWithToolsAsync (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools) |
Async chat with tools. | |
virtual AsyncGenerator< String > | streamChatAsync (const std::vector< Message > &messages) |
Stream chat with AsyncGenerator. | |
Interface for language model providers (OpenAI, Anthropic, Google, Ollama)
|
pure virtual |
Generate completion from a list of messages.
messages | The messages to generate completion from |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
virtual |
Async chat from a list of messages.
messages | The messages to generate completion from |
|
pure virtual |
Generate completion with available tools.
messages | The messages to generate completion from |
tools | The tools to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
virtual |
Async chat with tools.
messages | The messages to generate completion from |
tools | The tools to use |
|
virtual |
Generate completion from a list of messages.
messages | The messages to generate completion from |
|
virtual |
Generate completion from a prompt.
prompt | The prompt to generate completion from |
Reimplemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
virtual |
Async complete from a list of messages.
messages | The messages to generate completion from |
|
virtual |
Async complete from a prompt.
prompt | The prompt to generate completion from |
|
virtual |
Generate completion with available tools.
messages | The messages to generate completion from |
tools_schema | The tools schema to use |
|
pure virtual |
Get available models from this provider.
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Get current model.
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Get current options.
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Set API base URL (for self-hosted or proxied endpoints)
api_base | The API base URL to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Set API key.
api_key | The API key to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Set the model to use.
model | The model to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Set options for API calls.
options | The options to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
pure virtual |
Stream results with callback.
messages | The messages to generate completion from |
callback | The callback to use |
Implemented in agents::llms::AnthropicLLM, agents::llms::GoogleLLM, agents::llms::OllamaLLM, and agents::llms::OpenAILLM.
|
virtual |
Stream chat with AsyncGenerator.
messages | The messages to generate completion from |