12#include <agents-cpp/types.h>
13#include <agents-cpp/tool.h>
14#include <agents-cpp/coroutine_utils.h>
138 const std::vector<Message>& messages,
139 const std::vector<JsonObject>& tools_schema
156 const std::vector<Message>& messages,
157 const std::vector<std::shared_ptr<Tool>>&
tools
166 const std::vector<Message>& messages,
167 std::function<
void(
const String&,
bool)> callback
198 const std::vector<Message>& messages,
199 const std::vector<std::shared_ptr<Tool>>&
tools
208 const std::vector<Message>& messages
A minimal AsyncGenerator implementation that doesn't rely on coroutines.
Definition coroutine_utils.h:142
Interface for language model providers (OpenAI, Anthropic, Google, Ollama)
Definition llm_interface.h:68
virtual LLMResponse chat(const std::vector< Message > &messages)=0
Generate completion from a list of messages.
virtual void setOptions(const LLMOptions &options)=0
Set options for API calls.
virtual ~LLMInterface()=default
Destructor.
virtual AsyncGenerator< String > streamChatAsync(const std::vector< Message > &messages)
Stream chat with AsyncGenerator.
virtual void setModel(const String &model)=0
Set the model to use.
virtual void setApiKey(const String &api_key)=0
Set API key.
virtual Task< LLMResponse > completeAsync(const std::vector< Message > &messages)
Async complete from a list of messages.
virtual std::vector< String > getAvailableModels()=0
Get available models from this provider.
virtual LLMOptions getOptions() const =0
Get current options.
virtual Task< LLMResponse > completeAsync(const String &prompt)
Async complete from a prompt.
virtual void setApiBase(const String &api_base)=0
Set API base URL (for self-hosted or proxied endpoints)
virtual LLMResponse complete(const std::vector< Message > &messages)
Generate completion from a list of messages.
virtual LLMResponse completeWithTools(const std::vector< Message > &messages, const std::vector< JsonObject > &tools_schema)
Generate completion with available tools.
virtual LLMResponse complete(const String &prompt)
Generate completion from a prompt.
virtual LLMResponse chatWithTools(const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools)=0
Generate completion with available tools.
virtual void streamChat(const std::vector< Message > &messages, std::function< void(const String &, bool)> callback)=0
Stream results with callback.
virtual String getModel() const =0
Get current model.
virtual Task< LLMResponse > chatAsync(const std::vector< Message > &messages)
Async chat from a list of messages.
virtual Task< LLMResponse > chatWithToolsAsync(const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools)
Async chat with tools.
Provide a future-based fallback for Task.
Definition coroutine_utils.h:115
Framework Namespace.
Definition agent.h:18
std::shared_ptr< LLMInterface > createLLM(const String &provider, const String &api_key, const String &model="")
Factory function to create a specific LLM provider.
std::string String
String type.
Definition types.h:27
Options for LLM API calls.
Definition llm_interface.h:25
double presence_penalty
The presence penalty of the LLM.
Definition llm_interface.h:41
std::optional< String > response_mime_type
Response MIME type for structured output.
Definition llm_interface.h:62
double top_p
The top p of the LLM.
Definition llm_interface.h:37
double temperature
The temperature of the LLM.
Definition llm_interface.h:29
int max_tokens
The maximum number of tokens.
Definition llm_interface.h:33
std::optional< JsonObject > response_schema
Response schema for structured output (JSON schema)
Definition llm_interface.h:57
std::vector< String > stop_sequences
The stop sequences of the LLM.
Definition llm_interface.h:53
double frequency_penalty
The frequency penalty of the LLM.
Definition llm_interface.h:45
int timeout_ms
The timeout in milliseconds.
Definition llm_interface.h:49
Response from an LLM.
Definition types.h:85