#include <agents-cpp/agents/autonomous_agent.h>
#include <agents-cpp/config_loader.h>
#include <agents-cpp/logger.h>
#include <agents-cpp/tools/tool_registry.h>
#include <chrono>
#include <iostream>
} else {
}
}
if (!context.empty()) {
}
Logger::info(
"\nApprove this step? (y/n/m - y: approve, n: reject, m: modify): ");
char response;
std::cin >> response;
std::cin.ignore();
if (response == 'm' || response == 'M') {
std::getline(std::cin, user_modifications);
modifications = user_modifications;
return true;
}
return (response == 'y' || response == 'Y');
}
int main() {
Logger::info(
"Select LLM provider (1 for OpenAI, 2 for Anthropic, 3 for Google): ");
int provider_choice;
std::cin >> provider_choice;
std::cin.ignore();
std::shared_ptr<LLMInterface> llm;
if (provider_choice == 1) {
bool hasOpenAI = config.has("OPENAI_API_KEY");
if (hasOpenAI) {
llm =
createLLM(
"openai", config.get(
"OPENAI_API_KEY"),
"gpt-4o-2024-05-13");
} else {
return EXIT_FAILURE;
}
} else if (provider_choice == 2) {
bool hasAnthropic = config.has("ANTHROPIC_API_KEY");
if (hasAnthropic) {
llm =
createLLM(
"anthropic", config.get(
"ANTHROPIC_API_KEY"),
"claude-3-5-sonnet-20240620");
} else {
return EXIT_FAILURE;
}
} else if (provider_choice == 3) {
bool hasGoogle = config.has("GEMINI_API_KEY");
if (hasGoogle) {
llm =
createLLM(
"google", config.get(
"GEMINI_API_KEY"),
"gemini-2.0-flash");
} else {
return EXIT_FAILURE;
}
} else {
return EXIT_FAILURE;
}
llm->setOptions(options);
auto context = std::make_shared<AgentContext>();
context->setLLM(llm);
context->setSystemPrompt(
"You are a helpful, autonomous assistant with access to tools."
"You can use these tools to accomplish tasks for the user."
"Think step by step and be thorough in your approach."
);
"summarize",
"Summarizes a long piece of text into a concise summary",
{
{"text", "The text to summarize", "string", true},
{"max_length", "Maximum length of summary in words", "integer", false}
},
int max_length = params.contains("max_length") ? params["max_length"].get<int>() : 100;
auto summary_context = std::make_shared<AgentContext>(*context);
summary_context->setSystemPrompt(
"You are a summarization assistant. Your task is to create concise, accurate summaries "
"that capture the main points of the provided text."
);
String prompt =
"Summarize the following text in no more than " +
std::to_string(max_length) +
" words:\n\n" +
text;
LLMResponse llm_response = summary_context->getLLM()->complete(prompt);
true,
summary,
{{"summary", prompt + "\n\n" + summary}}
};
}
);
context->registerTool(summarize_tool);
Logger::info(
"2. Plan-and-Execute (Complex, structured task)");
int strategy_choice;
std::cin >> strategy_choice;
std::cin.ignore();
switch (strategy_choice) {
case 2:
break;
case 1:
default:
}
agent.setPlanningStrategy(strategy);
agent.setAgentPrompt(
"You are an advanced autonomous assistant capable of using tools to help users "
"accomplish their tasks. You break down complex problems into manageable steps "
"and execute them systematically. Always provide clear explanations of your "
"reasoning and approach."
);
char human_loop_choice;
std::cin >> human_loop_choice;
std::cin.ignore();
}
agent.setOptions(agent_options);
agent.setStepCallback(detailedStepCallback);
agent.init();
Logger::info(
"\n==================================================");
Logger::info(
"==================================================");
Logger::info(
"Enter a question or task for the agent (or 'exit' to quit):");
while (true) {
std::getline(std::cin, user_input);
if (user_input == "exit" || user_input == "quit" || user_input == "q") {
break;
}
if (user_input.empty()) {
continue;
}
try {
auto start_time = std::chrono::high_resolution_clock::now();
auto end_time = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::seconds>(end_time - start_time).count();
Logger::info(
"\n==================================================");
Logger::info(
"==================================================");
Logger::info(
"\n--------------------------------------------------");
Logger::info(
"Total steps: {}", result[
"steps"].get<JsonArray>().size());
if (result.contains("tool_calls")) {
Logger::info(
"Tool calls: {}", result[
"tool_calls"].get<int>());
}
Logger::info(
"==================================================");
} catch (const std::exception& e) {
}
}
return EXIT_SUCCESS;
}
An agent that operates autonomously to complete a task.
Definition autonomous_agent.h:28
PlanningStrategy
Planning strategy for the agent.
Definition autonomous_agent.h:55
@ PLAN_AND_EXECUTE
Generate a plan then execute it.
Definition autonomous_agent.h:67
@ REACT
Reasoning and acting.
Definition autonomous_agent.h:75
static ConfigLoader & getInstance()
Get the singleton instance of ConfigLoader.
@ INFO
Info logging level.
Definition logger.h:40
static void error(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at error level.
Definition logger.h:124
static void init(Level level=Level::INFO)
Initialize the logger.
static void info(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at info level.
Definition logger.h:104
Framework Namespace.
Definition agent.h:18
nlohmann::json JsonObject
JSON object type.
Definition types.h:39
std::shared_ptr< LLMInterface > createLLM(const String &provider, const String &api_key, const String &model="")
Factory function to create a specific LLM provider.
std::string String
String type.
Definition types.h:27
std::shared_ptr< Tool > createTool(const String &name, const String &description, const std::vector< Parameter > ¶meters, ToolCallback callback)
Create a custom tool with a name, description, parameters, and callback.
T blockingWait(Task< T > &&task)
Helper to run a coroutine and get the result synchronously.
Definition coroutine_utils.h:496
Agent execution options.
Definition agent.h:61
bool human_feedback_enabled
Whether human feedback is enabled.
Definition agent.h:75
int max_iterations
The maximum number of iterations.
Definition agent.h:65
std::function< bool(const String &, const JsonObject &, String &)> human_in_the_loop
The human in the loop function.
Definition agent.h:84
Step in the agent's execution.
Definition autonomous_agent.h:33
JsonObject result
The result of the step.
Definition autonomous_agent.h:45
bool success
Whether the step was successful.
Definition autonomous_agent.h:49
String description
The description of the step.
Definition autonomous_agent.h:37
String status
The status of the step.
Definition autonomous_agent.h:41
Options for LLM API calls.
Definition llm_interface.h:25
double temperature
The temperature of the LLM.
Definition llm_interface.h:29
int max_tokens
The maximum number of tokens.
Definition llm_interface.h:33
Response from an LLM.
Definition types.h:85
String content
The content of the response.
Definition types.h:89