#include <agents-cpp/agents/actor_agent.h>
#include <agents-cpp/config_loader.h>
#include <agents-cpp/logger.h>
#include <agents-cpp/tool.h>
#include <agents-cpp/workflows/parallelization_workflow.h>
#include <agents-cpp/workflows/prompt_chaining_workflow.h>
#include <chrono>
#include <iostream>
try {
if (params.contains("expression")) {
String expr = params[
"expression"];
double result = 0.0;
if (expr == "1+1") {
result = 2.0;
} else if (expr == "2*3") {
result = 6.0;
} else {
result = 42.0;
}
return {
true,
"Calculated result: " + std::to_string(result),
{{"result", result}}
};
} else {
return {
false,
"Missing expression parameter",
{{"error", "Missing expression parameter"}}
};
}
} catch (const std::exception& e) {
return {
false,
"Error calculating result: " +
String(e.what()),
{{"error", e.what()}}
};
}
}
try {
if (params.contains("location")) {
String location = params[
"location"];
double temperature = 22.0;
return {
true,
"Weather in " + location + ": " + weather + ", " + std::to_string(temperature) + "°C",
{
{"location", location},
{"weather", weather},
{"temperature", temperature}
}
};
} else {
return {
false,
"Missing location parameter",
{{"error", "Missing location parameter"}}
};
}
} catch (const std::exception& e) {
return {
false,
"Error getting weather: " +
String(e.what()),
{{"error", e.what()}}
};
}
}
int main(int argc, char* argv[]) {
api_key = config.get("GEMINI_API_KEY", "");
if (api_key.empty() && argc > 1) {
api_key = argv[1];
}
if (api_key.empty()) {
Logger::error(
"1. Create a .env file with GEMINI_API_KEY=your_key, or");
Logger::error(
"2. Set the GEMINI_API_KEY environment variable, or");
Logger::error(
"3. Provide an API key as a command line argument");
return EXIT_FAILURE;
}
try {
auto llm =
createLLM(
"google", api_key,
"gemini-1.5-flash");
llm->setOptions(options);
"calculator",
"Calculate mathematical expressions",
{
{"expression", "The mathematical expression to calculate", "string", true}
},
calculatorTool
);
"weather",
"Get weather information for a location",
{
{"location", "The location to get weather for", "string", true}
},
weatherTool
);
auto context = std::make_shared<AgentContext>();
context->setLLM(llm);
context->registerTool(calculator);
context->registerTool(weather);
std::cout << "\n=== Example 1: Prompt Chaining Workflow ===\n\n";
auto chaining_workflow = std::make_shared<PromptChainingWorkflow>(context);
chaining_workflow->addStep(
"brainstorm",
"Brainstorm 3 creative ideas for a short story about space exploration. Return them as a JSON array."
);
chaining_workflow->addStep(
"select",
"From these ideas, select the most interesting one and explain why you chose it:\n{{response}}"
);
chaining_workflow->addStep(
"outline",
"Create a brief outline for a story based on this idea:\n{{response}}"
);
auto result = chaining_workflow->run();
std::cout << "Prompt chaining result: " << result.dump(2) << "\n\n";
std::cout << "\n=== Example 2: Parallelization Workflow (Sectioning) ===\n\n";
auto parallel_workflow = std::make_shared<ParallelizationWorkflow>(
);
parallel_workflow->addTask(
"characters",
"Create 2 interesting characters for a sci-fi story set on Mars."
);
parallel_workflow->addTask(
"setting",
"Describe the environment and setting of a Mars colony in the year 2150."
);
parallel_workflow->addTask(
"plot",
"Create a plot outline for a mystery story set on Mars."
);
parallel_workflow->init();
result = parallel_workflow->run();
std::cout << "Parallelization result: " << result.dump(2) << "\n\n";
std::cout << "\n=== Example 3: Actor Agent with Tools ===\n\n";
auto agent = std::make_shared<ActorAgent>(context);
agent->setAgentPrompt(
"You are a helpful assistant that can answer questions and use tools to get information. "
"When using tools, make sure to include all necessary parameters."
);
agent->setOptions(agent_options);
agent->setStatusCallback([](
const String& status) {
std::cout << "Agent status: " << status << "\n";
});
agent->init();
std::vector<String> tasks = {
"What is 1+1?",
"What's the weather like in New York?",
"Tell me a short story about a robot learning to feel emotions."
};
for (const auto& task : tasks) {
std::cout << "\nTask: " << task << "\n";
std::cout << "Result: " << result.dump(2) << "\n";
std::this_thread::sleep_for(std::chrono::seconds(1));
}
return EXIT_SUCCESS;
} catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << "\n";
return EXIT_FAILURE;
}
}
static ConfigLoader & getInstance()
Get the singleton instance of ConfigLoader.
@ INFO
Info logging level.
Definition logger.h:40
static void error(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at error level.
Definition logger.h:124
static void setLevel(Level level)
Set the log level.
@ SECTIONING
Break task into independent subtasks.
Definition parallelization_workflow.h:37
Worflows Namespace.
Definition workflow.h:22
Framework Namespace.
Definition agent.h:18
nlohmann::json JsonObject
JSON object type.
Definition types.h:39
std::shared_ptr< LLMInterface > createLLM(const String &provider, const String &api_key, const String &model="")
Factory function to create a specific LLM provider.
std::string String
String type.
Definition types.h:27
std::shared_ptr< Tool > createTool(const String &name, const String &description, const std::vector< Parameter > ¶meters, ToolCallback callback)
Create a custom tool with a name, description, parameters, and callback.
T blockingWait(Task< T > &&task)
Helper to run a coroutine and get the result synchronously.
Definition coroutine_utils.h:496
Agent execution options.
Definition agent.h:61
bool human_feedback_enabled
Whether human feedback is enabled.
Definition agent.h:75
int max_iterations
The maximum number of iterations.
Definition agent.h:65
Options for LLM API calls.
Definition llm_interface.h:25
double temperature
The temperature of the LLM.
Definition llm_interface.h:29
int max_tokens
The maximum number of tokens.
Definition llm_interface.h:33