|
| | GoogleLLM (const std::string &api_key="", const std::string &model="gemini-2.0-flash") |
| | Constructor.
|
|
| ~GoogleLLM () override=default |
| | Destructor.
|
| std::vector< std::string > | getAvailableModels () override |
| | Get available models from Google.
|
| void | setModel (const std::string &model) override |
| | Set the model to use.
|
| std::string | getModel () const override |
| | Get current model.
|
| void | setApiKey (const std::string &api_key) override |
| | Set API key.
|
| void | setApiBase (const std::string &api_base) override |
| | Set API base URL (for self-hosted or proxied endpoints).
|
| void | setOptions (const LLMOptions &options) override |
| | Set options for API calls.
|
| LLMOptions | getOptions () const override |
| | Get current options.
|
| LLMResponse | chat (const std::string &prompt) override |
| | Generate completion from a prompt.
|
| LLMResponse | chat (const std::vector< Message > &messages) override |
| | Generate completion from a list of messages.
|
| LLMResponse | chatWithTools (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools) override |
| | Generate completion with available tools.
|
| void | streamChat (const std::vector< Message > &messages, std::function< void(const std::string &, bool)> callback) override |
| | Stream results with callback.
|
| AsyncGenerator< std::string > | streamChatAsync (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools) override |
| | Async completion chat.
|
| AsyncGenerator< std::pair< std::string, ToolCalls > > | streamChatAsyncWithTools (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools) override |
| | Async completion chat with tools.
|
| std::optional< JsonObject > | uploadMediaFile (const std::string &local_path, const std::string &mime, const std::string &binary="") override |
| | Upload a local media file to the provider's file storage and return a canonical media envelope (e.g., with fileUri).
|
| virtual Task< LLMResponse > | chatAsync (const std::vector< Message > &messages) |
| | Async chat from a list of messages.
|
| virtual Task< LLMResponse > | chatWithToolsAsync (const std::vector< Message > &messages, const std::vector< std::shared_ptr< Tool > > &tools) |
| | Async chat with tools.
|
Implementation of LLMInterface for Google Gemini models.