Routing Example.
#include <agents-cpp/config_loader.h>
#include <agents-cpp/logger.h>
#include <agents-cpp/tools/tool_registry.h>
#include <agents-cpp/workflows/routing_workflow.h>
#include <iostream>
#include <string>
int main(int argc, char* argv[]) {
api_key = config.get("GEMINI_API_KEY", "");
if (api_key.empty() && argc > 1) {
api_key = argv[1];
}
if (api_key.empty()) {
Logger::error(
"1. Create a .env file with GEMINI_API_KEY=your_key, or");
Logger::error(
"2. Set the GEMINI_API_KEY environment variable, or");
Logger::error(
"3. Provide an API key as a command line argument");
return EXIT_FAILURE;
}
auto llm =
createLLM(
"google", api_key,
"gemini-1.5-flash");
llm->setOptions(options);
auto context = std::make_shared<AgentContext>();
context->setLLM(llm);
"You are a routing assistant that examines user queries and classifies them into appropriate categories. "
"Determine the most suitable category for handling the user's query based on the available routes."
);
"factual_query",
"Questions about facts, events, statistics, or general knowledge",
ToolResult result = wiki_tool->execute({{
"query", input}});
response[
"answer"] =
"Based on research: " + result.
content;
return response;
}
);
"opinion_query",
"Questions seeking opinions, evaluations, or judgments on topics",
auto opinion_context = std::make_shared<AgentContext>(*context);
opinion_context->setSystemPrompt(
"You are a balanced and thoughtful assistant that provides nuanced perspectives on complex topics. "
"Consider multiple viewpoints and provide balanced opinions."
);
LLMResponse llm_response = opinion_context->getLLM()->complete(input);
result["answer"] = "Opinion analysis: " + response;
return result;
}
);
"technical_query",
"Questions about technical topics, programming, or specialized domains",
auto technical_context = std::make_shared<AgentContext>(*context);
technical_context->setSystemPrompt(
"You are a technical expert assistant that provides accurate and detailed information on technical topics. "
"Focus on clarity, precision, and correctness."
);
LLMResponse llm_response = technical_context->getLLM()->complete(input);
result["answer"] = "Technical explanation: " + response;
return result;
}
);
LLMResponse llm_response = context->getLLM()->complete(input);
result["answer"] = "General response: " + response;
return result;
});
while (true) {
std::getline(std::cin, user_input);
if (user_input == "exit" || user_input == "quit" || user_input == "q") {
break;
}
if (user_input.empty()) {
continue;
}
try {
Logger::info(
"\nResponse: {}", result[
"answer"].get<String>());
} catch (const std::exception& e) {
}
}
return EXIT_SUCCESS;
}
static ConfigLoader & getInstance()
Get the singleton instance of ConfigLoader.
static void debug(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at debug level.
Definition logger.h:94
@ INFO
Info logging level.
Definition logger.h:40
static void error(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at error level.
Definition logger.h:124
static void init(Level level=Level::INFO)
Initialize the logger.
static void info(fmt::format_string< Args... > fmt, Args &&... args)
Log a message at info level.
Definition logger.h:104
Routing workflow using the actor model.
Definition routing_workflow.h:26
void addRoute(const String &name, const String &description, std::function< JsonObject(const String &, const JsonObject &)> handler)
Add a route with a direct function handler.
void setDefaultRoute(std::function< JsonObject(const String &, const JsonObject &)> handler)
Set default route.
void setRouterPrompt(const String &prompt_template)
Set the router prompt.
Definition routing_workflow.h:132
JsonObject run(const String &input) override
Execute the workflow with input.
Framework Namespace.
Definition agent.h:18
nlohmann::json JsonObject
JSON object type.
Definition types.h:39
std::shared_ptr< LLMInterface > createLLM(const String &provider, const String &api_key, const String &model="")
Factory function to create a specific LLM provider.
std::string String
String type.
Definition types.h:27
Options for LLM API calls.
Definition llm_interface.h:25
double temperature
The temperature of the LLM.
Definition llm_interface.h:29
int max_tokens
The maximum number of tokens.
Definition llm_interface.h:33
Response from an LLM.
Definition types.h:85
String content
The content of the response.
Definition types.h:89