Agents 0.0.2
Edge AI Agents SDK
Loading...
Searching...
No Matches
actor_agent_example.cpp

Actor Agent Example.

Actor Agent Example

Version
0.1
Date
2025-07-20
#include <agents-cpp/agent_context.h>
#include <agents-cpp/agents/actor_agent.h>
#include <agents-cpp/config_loader.h>
#include <agents-cpp/llm_interface.h>
#include <agents-cpp/logger.h>
#include <agents-cpp/tool.h>
#include <agents-cpp/workflows/parallelization_workflow.h>
#include <agents-cpp/workflows/prompt_chaining_workflow.h>
#include <chrono>
#include <iostream>
#include <thread>
// Replace blanket using directive with specific ones
// Example tool: Calculator
ToolResult calculatorTool(const JsonObject& params) {
try {
if (params.contains("expression")) {
String expr = params["expression"];
// Very simple calculator for demo purposes
// In a real-world scenario, you'd use a proper expression evaluator
double result = 0.0;
// Just a dummy implementation for demo purposes
if (expr == "1+1") {
result = 2.0;
} else if (expr == "2*3") {
result = 6.0;
} else {
// Default response
result = 42.0;
}
return {
true,
"Calculated result: " + std::to_string(result),
{{"result", result}}
};
} else {
return {
false,
"Missing expression parameter",
{{"error", "Missing expression parameter"}}
};
}
} catch (const std::exception& e) {
return {
false,
"Error calculating result: " + String(e.what()),
{{"error", e.what()}}
};
}
}
// Example tool: Weather
ToolResult weatherTool(const JsonObject& params) {
try {
if (params.contains("location")) {
String location = params["location"];
// Just a dummy implementation for demo purposes
String weather = "sunny";
double temperature = 22.0;
return {
true,
"Weather in " + location + ": " + weather + ", " + std::to_string(temperature) + "°C",
{
{"location", location},
{"weather", weather},
{"temperature", temperature}
}
};
} else {
return {
false,
"Missing location parameter",
{{"error", "Missing location parameter"}}
};
}
} catch (const std::exception& e) {
return {
false,
"Error getting weather: " + String(e.what()),
{{"error", e.what()}}
};
}
}
int main(int argc, char* argv[]) {
// Set up logging
Logger::setLevel(Logger::Level::DEBUG);
// Get API key from .env, environment, or command line
String api_key;
auto& config = ConfigLoader::getInstance();
// Try to get API key from config or environment
api_key = config.get("GEMINI_API_KEY", "");
// If not found, check command line
if (api_key.empty() && argc > 1) {
api_key = argv[1];
}
// Still not found, show error and exit
if (api_key.empty()) {
Logger::error("API key not found. Please:");
Logger::error("1. Create a .env file with GEMINI_API_KEY=your_key, or");
Logger::error("2. Set the GEMINI_API_KEY environment variable, or");
Logger::error("3. Provide an API key as a command line argument");
return 1;
}
try {
// Create LLM interface
auto llm = createLLM("google", api_key, "gemini-1.5-flash");
// Set up options
LLMOptions options;
options.temperature = 0.7;
options.max_tokens = 1000;
llm->setOptions(options);
// Create tools
auto calculator = createTool(
"calculator",
"Calculate mathematical expressions",
{
{"expression", "string", "The mathematical expression to calculate", true}
},
calculatorTool
);
auto weather = createTool(
"weather",
"Get weather information for a location",
{
{"location", "string", "The location to get weather for", true}
},
weatherTool
);
// Create agent context
auto context = std::make_shared<AgentContext>();
context->setLLM(llm);
context->registerTool(calculator);
context->registerTool(weather);
// Example 1: Using the prompt chaining workflow
std::cout << "\n=== Example 1: Prompt Chaining Workflow ===\n\n";
auto chaining_workflow = std::make_shared<PromptChainingWorkflow>(context);
// Add steps to the workflow
chaining_workflow->addStep(
"brainstorm",
"Brainstorm 3 creative ideas for a short story about space exploration. Return them as a JSON array."
);
chaining_workflow->addStep(
"select",
"From these ideas, select the most interesting one and explain why you chose it:\n{{response}}"
);
chaining_workflow->addStep(
"outline",
"Create a brief outline for a story based on this idea:\n{{response}}"
);
// Initialize and execute the workflow
auto result = chaining_workflow->run();
std::cout << "Prompt chaining result: " << result.dump(2) << "\n\n";
// Example 2: Using the parallelization workflow
std::cout << "\n=== Example 2: Parallelization Workflow (Sectioning) ===\n\n";
auto parallel_workflow = std::make_shared<ParallelizationWorkflow>(
context, ParallelizationWorkflow::Strategy::SECTIONING
);
// Add tasks to the workflow
parallel_workflow->addTask(
"characters",
"Create 2 interesting characters for a sci-fi story set on Mars."
);
parallel_workflow->addTask(
"setting",
"Describe the environment and setting of a Mars colony in the year 2150."
);
parallel_workflow->addTask(
"plot",
"Create a plot outline for a mystery story set on Mars."
);
// Initialize and execute the workflow
parallel_workflow->init();
result = parallel_workflow->run();
std::cout << "Parallelization result: " << result.dump(2) << "\n\n";
// Example 3: Using the actor agent
std::cout << "\n=== Example 3: Actor Agent with Tools ===\n\n";
auto agent = std::make_shared<ActorAgent>(context);
// Set system prompt
agent->setSystemPrompt(
"You are a helpful assistant that can answer questions and use tools to get information. "
"When using tools, make sure to include all necessary parameters."
);
// Set options
Agent::Options agent_options;
agent_options.max_iterations = 5;
agent_options.human_feedback_enabled = false;
agent->setOptions(agent_options);
// Register status callback
agent->setStatusCallback([](const String& status) {
std::cout << "Agent status: " << status << "\n";
});
// Initialize and run the agent
agent->init();
// Run the agent with multiple tasks
std::vector<String> tasks = {
"What is 1+1?",
"What's the weather like in New York?",
"Tell me a short story about a robot learning to feel emotions."
};
for (const auto& task : tasks) {
std::cout << "\nTask: " << task << "\n";
result = blockingWait(agent->run(task));
std::cout << "Result: " << result.dump(2) << "\n";
// Small delay between tasks
std::this_thread::sleep_for(std::chrono::seconds(1));
}
return 0;
} catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << "\n";
return 1;
}
}
static ConfigLoader & getInstance()
Get the singleton instance of ConfigLoader.
static void error(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at error level.
Definition logger.h:124
static void setLevel(Level level)
Set the log level.
Actor-based agent implementation.
Definition actor_agent.h:31
Context for an agent, containing tools, LLM, and memory.
Definition agent_context.h:30
Interface for agents.
Definition agent.h:26
A utility class to load and access configuration values from .env files.
Definition config_loader.h:24
Interface for language model providers (OpenAI, Anthropic, Google, Ollama)
Definition llm_interface.h:68
Logger utility class that wraps spdlog functionality.
Definition logger.h:23
Interface for tools that an agent can use.
Definition tool.h:49
A workflow that runs multiple tasks in parallel.
Definition parallelization_workflow.h:27
A workflow that chains multiple prompts together.
Definition prompt_chaining_workflow.h:26
nlohmann::json JsonObject
JSON object type.
Definition types.h:39
std::shared_ptr< LLMInterface > createLLM(const String &provider, const String &api_key, const String &model="")
Factory function to create a specific LLM provider.
std::string String
String type.
Definition types.h:27
std::shared_ptr< Tool > createTool(const String &name, const String &description, const std::vector< Parameter > &parameters, ToolCallback callback)
Create a custom tool with a name, description, parameters, and callback.
T blockingWait(Task< T > &&task)
Helper to run a task and get the result synchronously.
Definition coroutine_utils.h:169
Agent execution options.
Definition agent.h:61
bool human_feedback_enabled
Whether human feedback is enabled.
Definition agent.h:75
int max_iterations
The maximum number of iterations.
Definition agent.h:65
Options for LLM API calls.
Definition llm_interface.h:25
double temperature
The temperature of the LLM.
Definition llm_interface.h:29
int max_tokens
The maximum number of tokens.
Definition llm_interface.h:33
Result of a tool execution.
Definition tool.h:22
Options for LLM API calls.
Definition llm_interface.h:25
Response from an LLM.
Definition types.h:85
Message in a conversation.
Definition types.h:105
Result of a tool execution.
Definition tool.h:22