Documentation
¶
Index ¶
- func ChatWithHistory(ctx context.Context, model string, history []messages.ChatMessage, ...) (*messages.ChatMessage, error)
- func ConvertToAnthropicTool(schema *Schema) anthropic.ToolUnionParam
- func ConvertToGeminiSchema(schema *Schema) *genai.Schema
- func ConvertToOllamaFormat(schema *Schema) string
- func ConvertToOpenAISchema(schema *Schema) *ai.ChatCompletionResponseFormat
- func ConvertToolToAnthropic(schema *mcpjsonschema.Schema) anthropic.ToolUnionParam
- func ConvertToolToGemini(schema *mcpjsonschema.Schema) *genai.Tool
- func ConvertToolToOllama(schema *mcpjsonschema.Schema) ollamaapi.Tool
- func ConvertToolToOpenAI(schema *mcpjsonschema.Schema) ai.Tool
- func MessageToOpenAI(msg messages.ChatMessage) ai.ChatCompletionMessage
- func MessagesToAnthropicParams(msgs []messages.ChatMessage) ([]anthropic.MessageParam, string)
- func MessagesToGeminiContent(msgs []messages.ChatMessage) ([]*genai.Content, string, map[string]string)
- func MessagesToOllama(msgs []messages.ChatMessage) []ollamaapi.Message
- func MessagesToOpenAI(msgs []messages.ChatMessage) []ai.ChatCompletionMessage
- func QuickComplete(ctx context.Context, model, prompt string, maxTokens int) (string, error)
- func StreamComplete(ctx context.Context, model, prompt string, maxTokens int, onChunk func(string)) error
- func StructuredComplete(ctx context.Context, model, prompt string, schema *Schema, maxTokens int, ...) error
- type Agent
- type AgentCallbacks
- type AgentConfig
- type AgentResponse
- type AnthropicClient
- type CompletionBuilder
- func (b *CompletionBuilder) Build() *CompletionRequest
- func (b *CompletionBuilder) Execute(ctx context.Context, client LLM) (string, error)
- func (b *CompletionBuilder) ExecuteStreaming(ctx context.Context, client LLM, onChunk func(string)) error
- func (b *CompletionBuilder) ExecuteWithTools(ctx context.Context, client LLM, toolRegistry *tools.ToolRegistry) (*messages.ChatMessage, error)
- func (b *CompletionBuilder) WithAssistantMessage(content string) *CompletionBuilder
- func (b *CompletionBuilder) WithHistory(history []messages.ChatMessage) *CompletionBuilder
- func (b *CompletionBuilder) WithMaxTokens(tokens int) *CompletionBuilder
- func (b *CompletionBuilder) WithSchema(schema *Schema) *CompletionBuilder
- func (b *CompletionBuilder) WithSystemPrompt(prompt string) *CompletionBuilder
- func (b *CompletionBuilder) WithTemperature(temp float32) *CompletionBuilder
- func (b *CompletionBuilder) WithTimeout(timeout time.Duration) *CompletionBuilder
- func (b *CompletionBuilder) WithTools(tools []tools.Tool) *CompletionBuilder
- func (b *CompletionBuilder) WithUserMessage(content string) *CompletionBuilder
- type CompletionRequest
- type EventStreamProcessor
- type ExecutionHooks
- type GeminiClient
- type LLM
- type MultiPass
- type OllamaClient
- type OpenAIClient
- type Schema
- type SimpleProcessor
- type ThinkingEffort
- type ToolExecutor
- func (e *ToolExecutor) ExecuteAll(ctx context.Context, toolCalls []messages.ChatMessageToolCall, ...)
- func (e *ToolExecutor) ExecuteToolCall(ctx context.Context, toolCall messages.ChatMessageToolCall, ...) bool
- func (e *ToolExecutor) WithHooks(hooks *ExecutionHooks) *ToolExecutor
- func (e *ToolExecutor) WithTimeout(timeout time.Duration) *ToolExecutor
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func ChatWithHistory ¶
func ChatWithHistory(ctx context.Context, model string, history []messages.ChatMessage, newMessage string, maxTokens int) (*messages.ChatMessage, error)
ChatWithHistory performs a completion with conversation history
func ConvertToAnthropicTool ¶
func ConvertToAnthropicTool(schema *Schema) anthropic.ToolUnionParam
ConvertToAnthropicTool creates a synthetic tool for structured output with Anthropic
func ConvertToGeminiSchema ¶
ConvertToGeminiSchema converts a generic JSON schema to Gemini's format
func ConvertToOllamaFormat ¶
ConvertToOllamaFormat adds format instructions for Ollama
func ConvertToOpenAISchema ¶
func ConvertToOpenAISchema(schema *Schema) *ai.ChatCompletionResponseFormat
ConvertToOpenAISchema converts a generic JSON schema to OpenAI's format
func ConvertToolToAnthropic ¶
func ConvertToolToAnthropic(schema *mcpjsonschema.Schema) anthropic.ToolUnionParam
ConvertToolToAnthropic converts a generic tool schema to Anthropic format
func ConvertToolToGemini ¶
func ConvertToolToGemini(schema *mcpjsonschema.Schema) *genai.Tool
ConvertToolToGemini converts a generic tool schema to Gemini format
func ConvertToolToOllama ¶
func ConvertToolToOllama(schema *mcpjsonschema.Schema) ollamaapi.Tool
ConvertToolToOllama converts a generic tool schema to Ollama native format
func ConvertToolToOpenAI ¶
func ConvertToolToOpenAI(schema *mcpjsonschema.Schema) ai.Tool
ConvertToolToOpenAI converts a generic tool schema to OpenAI format
func MessageToOpenAI ¶
func MessageToOpenAI(msg messages.ChatMessage) ai.ChatCompletionMessage
MessageToOpenAI converts our agnostic message to OpenAI format
func MessagesToAnthropicParams ¶
func MessagesToAnthropicParams(msgs []messages.ChatMessage) ([]anthropic.MessageParam, string)
MessagesToAnthropicParams converts messages to Anthropic message parameters
func MessagesToGeminiContent ¶
func MessagesToGeminiContent(msgs []messages.ChatMessage) ([]*genai.Content, string, map[string]string)
MessagesToGeminiContent converts messages to Gemini content format
func MessagesToOllama ¶
func MessagesToOllama(msgs []messages.ChatMessage) []ollamaapi.Message
MessagesToOllama converts messages to Ollama format
func MessagesToOpenAI ¶
func MessagesToOpenAI(msgs []messages.ChatMessage) []ai.ChatCompletionMessage
MessagesToOpenAI converts a slice of agnostic messages to OpenAI format
func QuickComplete ¶
QuickComplete performs a simple one-shot completion with minimal configuration
Types ¶
type Agent ¶
type Agent struct {
// contains filtered or unexported fields
}
Agent handles the agentic loop without owning session state. It executes completions with automatic tool call handling.
func NewAgent ¶
func NewAgent(client LLM, registry *tools.ToolRegistry, config AgentConfig) *Agent
NewAgent creates a stateless agent that handles the agentic loop. The agent does not own session state - callers provide messages and receive back all generated messages to add to their own session.
func (*Agent) Run ¶
func (a *Agent) Run(ctx context.Context, req *CompletionRequest, cb *AgentCallbacks) (*AgentResponse, error)
Run executes a completion with automatic tool call handling. It loops until the LLM returns a response with no tool calls, or until MaxIterations is reached.
The caller provides messages in req.Messages and receives back all generated messages (assistant responses + tool results) in AgentResponse.AllMessages. The caller is responsible for adding these to their session.
type AgentCallbacks ¶
type AgentCallbacks struct {
// OnReasoning is called when reasoning/thinking content is streamed
OnReasoning func(content string)
// OnContent is called when regular content is streamed
OnContent func(content string)
// BeforeToolExecute is called before each tool executes.
// Returns a (possibly modified) context to pass to the tool.
// Use this to inject context values that tools need (e.g., IRC context).
// If nil, context passes through unchanged.
BeforeToolExecute func(ctx context.Context, call messages.ChatMessageToolCall, args map[string]any) context.Context
// OnToolStart is called once before parallel tool execution begins with all tool calls
OnToolStart func(calls []messages.ChatMessageToolCall)
// OnToolEnd is called after each tool executes
OnToolEnd func(call messages.ChatMessageToolCall, result string, duration time.Duration, err error)
// OnComplete is called when the final response is ready (no more tool calls)
OnComplete func(response *messages.ChatMessage)
// OnError is called when an error occurs
OnError func(err error)
}
AgentCallbacks provides hooks for observing and customizing agent execution
type AgentConfig ¶
type AgentConfig struct {
MaxIterations int // Maximum LLM calls before giving up (default: 10)
ToolTimeout time.Duration // Per-tool execution timeout (0 = no timeout)
MaxParallelTools int // Maximum parallel tool executions (0 = unlimited)
}
AgentConfig configures agent behavior
type AgentResponse ¶
type AgentResponse struct {
Message *messages.ChatMessage // Final assistant message (no tool calls)
AllMessages []messages.ChatMessage // All messages generated (assistant + tool results)
IterationCount int // Number of LLM calls made
}
AgentResponse contains the results after Run completes
type AnthropicClient ¶
type AnthropicClient struct {
// contains filtered or unexported fields
}
func NewAnthropicClient ¶
func NewAnthropicClient(apiKey string) *AnthropicClient
func (*AnthropicClient) ChatCompletionStream ¶
func (a *AnthropicClient) ChatCompletionStream(ctx context.Context, req *CompletionRequest, processor EventStreamProcessor) <-chan *messages.StreamEvent
ChatCompletionStream implements the event-based streaming interface
type CompletionBuilder ¶
type CompletionBuilder struct {
// contains filtered or unexported fields
}
CompletionBuilder provides a fluent interface for building completion requests
func NewCompletionBuilder ¶
func NewCompletionBuilder(model string) *CompletionBuilder
NewCompletionBuilder creates a new builder with defaults
func (*CompletionBuilder) Build ¶
func (b *CompletionBuilder) Build() *CompletionRequest
Build returns the built CompletionRequest
func (*CompletionBuilder) ExecuteStreaming ¶
func (b *CompletionBuilder) ExecuteStreaming(ctx context.Context, client LLM, onChunk func(string)) error
ExecuteStreaming runs the completion with streaming callback
func (*CompletionBuilder) ExecuteWithTools ¶
func (b *CompletionBuilder) ExecuteWithTools(ctx context.Context, client LLM, toolRegistry *tools.ToolRegistry) (*messages.ChatMessage, error)
ExecuteWithTools runs the completion and handles tool calls automatically
func (*CompletionBuilder) WithAssistantMessage ¶
func (b *CompletionBuilder) WithAssistantMessage(content string) *CompletionBuilder
WithAssistantMessage adds an assistant message (for conversation history)
func (*CompletionBuilder) WithHistory ¶
func (b *CompletionBuilder) WithHistory(history []messages.ChatMessage) *CompletionBuilder
WithHistory adds conversation history
func (*CompletionBuilder) WithMaxTokens ¶
func (b *CompletionBuilder) WithMaxTokens(tokens int) *CompletionBuilder
WithMaxTokens sets the max tokens
func (*CompletionBuilder) WithSchema ¶
func (b *CompletionBuilder) WithSchema(schema *Schema) *CompletionBuilder
WithSchema adds a response schema for structured output
func (*CompletionBuilder) WithSystemPrompt ¶
func (b *CompletionBuilder) WithSystemPrompt(prompt string) *CompletionBuilder
WithSystemPrompt adds a system message
func (*CompletionBuilder) WithTemperature ¶
func (b *CompletionBuilder) WithTemperature(temp float32) *CompletionBuilder
WithTemperature sets the temperature
func (*CompletionBuilder) WithTimeout ¶
func (b *CompletionBuilder) WithTimeout(timeout time.Duration) *CompletionBuilder
WithTimeout sets the timeout
func (*CompletionBuilder) WithTools ¶
func (b *CompletionBuilder) WithTools(tools []tools.Tool) *CompletionBuilder
WithTools adds tools for function calling
func (*CompletionBuilder) WithUserMessage ¶
func (b *CompletionBuilder) WithUserMessage(content string) *CompletionBuilder
WithUserMessage adds a user message
type CompletionRequest ¶
type CompletionRequest struct {
APIKey string
BaseURL string
Timeout time.Duration
Temperature float32
Model string
MaxTokens int
Messages []messages.ChatMessage // Message history
Tools []tools.Tool // Available tools
ResponseSchema *Schema // Optional schema for structured output
ThinkingEffort ThinkingEffort // Reasoning effort level: ThinkingOff, ThinkingLow, ThinkingMedium, ThinkingHigh
Stream *bool // nil = streaming (default), false = non-streaming
}
CompletionRequest contains all parameters for a completion request
type EventStreamProcessor ¶
type EventStreamProcessor interface {
ProcessMessagesToEvents(<-chan messages.ChatMessage) <-chan *messages.StreamEvent
}
EventStreamProcessor processes message streams into events
type ExecutionHooks ¶
type ExecutionHooks struct {
// BeforeExecute is called before each tool executes.
// Returns a (possibly modified) context to pass to the tool.
// If nil, context passes through unchanged.
BeforeExecute func(ctx context.Context, toolCall messages.ChatMessageToolCall, args map[string]any) context.Context
// AfterExecute is called after each tool executes with timing info.
// Receives the result string and any error that occurred.
AfterExecute func(toolCall messages.ChatMessageToolCall, result string, duration time.Duration, err error)
// OnParseError is called when tool arguments fail to parse.
// Returns the error message to use. If nil, uses default message.
OnParseError func(toolCall messages.ChatMessageToolCall, err error) string
// OnToolNotFound is called when a tool isn't in the registry.
// Returns the error message to use. If nil, uses default message.
OnToolNotFound func(toolCall messages.ChatMessageToolCall) string
}
ExecutionHooks provides callbacks for customizing tool execution
type GeminiClient ¶
type GeminiClient struct {
// contains filtered or unexported fields
}
func NewGeminiClient ¶
func NewGeminiClient(apiKey string) *GeminiClient
func (*GeminiClient) ChatCompletionStream ¶
func (g *GeminiClient) ChatCompletionStream(ctx context.Context, req *CompletionRequest, processor EventStreamProcessor) <-chan *messages.StreamEvent
ChatCompletionStream implements the event-based streaming interface
type LLM ¶
type LLM interface {
// Event-based streaming method
ChatCompletionStream(context.Context, *CompletionRequest, EventStreamProcessor) <-chan *messages.StreamEvent
}
LLM interface defines the contract for language model implementations
func GetDefaultClient ¶
func GetDefaultClient() LLM
GetDefaultClient creates a MultiPass client with API keys from environment
type MultiPass ¶
type MultiPass struct {
// contains filtered or unexported fields
}
MultiPass routes requests to different LLM providers based on model prefix
func NewMultiPass ¶
NewMultiPass creates a new multi-provider router
func (*MultiPass) ChatCompletionStream ¶
func (m *MultiPass) ChatCompletionStream(ctx context.Context, req *CompletionRequest, processor EventStreamProcessor) <-chan *messages.StreamEvent
ChatCompletionStream routes the request to the appropriate provider using event-based streaming
type OllamaClient ¶
type OllamaClient struct {
// contains filtered or unexported fields
}
func NewOllamaClient ¶
func NewOllamaClient(baseURL string, apiKey string) *OllamaClient
func (*OllamaClient) ChatCompletionStream ¶
func (o *OllamaClient) ChatCompletionStream(ctx context.Context, req *CompletionRequest, processor EventStreamProcessor) <-chan *messages.StreamEvent
ChatCompletionStream implements the event-based streaming interface
type OpenAIClient ¶
type OpenAIClient struct {
ClientConfig ai.ClientConfig
Client *ai.Client
}
func NewOpenAIClient ¶
func NewOpenAIClient(apiKey string, baseURL string) *OpenAIClient
func (OpenAIClient) ChatCompletionStream ¶
func (o OpenAIClient) ChatCompletionStream(ctx context.Context, req *CompletionRequest, processor EventStreamProcessor) <-chan *messages.StreamEvent
ChatCompletionStream implements the event-based streaming interface
type Schema ¶
type Schema struct {
Raw map[string]any // Raw JSON schema
Strict bool // Whether to enforce strict validation
}
Schema represents a JSON schema for structured output
type SimpleProcessor ¶
type SimpleProcessor struct{}
SimpleProcessor is a basic implementation of EventStreamProcessor
func (*SimpleProcessor) ProcessMessagesToEvents ¶
func (s *SimpleProcessor) ProcessMessagesToEvents(msgChan <-chan messages.ChatMessage) <-chan *messages.StreamEvent
type ThinkingEffort ¶
type ThinkingEffort string
ThinkingEffort represents the level of reasoning effort for models that support extended thinking
const ( ThinkingOff ThinkingEffort = "off" ThinkingLow ThinkingEffort = "low" ThinkingMedium ThinkingEffort = "medium" ThinkingHigh ThinkingEffort = "high" )
func ParseThinkingEffort ¶
func ParseThinkingEffort(s string) (ThinkingEffort, error)
ParseThinkingEffort converts a string to ThinkingEffort, returning error if invalid
func (ThinkingEffort) IsEnabled ¶
func (e ThinkingEffort) IsEnabled() bool
IsEnabled returns true if thinking is enabled (not off or empty)
type ToolExecutor ¶
type ToolExecutor struct {
Registry *tools.ToolRegistry
Hooks *ExecutionHooks
Timeout time.Duration // Default timeout for tool execution
}
ToolExecutor handles tool execution with customizable hooks
func NewToolExecutor ¶
func NewToolExecutor(registry *tools.ToolRegistry) *ToolExecutor
NewToolExecutor creates a new executor with the given registry
func (*ToolExecutor) ExecuteAll ¶
func (e *ToolExecutor) ExecuteAll( ctx context.Context, toolCalls []messages.ChatMessageToolCall, session sessions.Session, )
ExecuteAll executes all tool calls and adds results to session
func (*ToolExecutor) ExecuteToolCall ¶
func (e *ToolExecutor) ExecuteToolCall( ctx context.Context, toolCall messages.ChatMessageToolCall, session sessions.Session, ) bool
ExecuteToolCall executes a single tool call and adds result to session. Returns true if successful, false on error.
func (*ToolExecutor) WithHooks ¶
func (e *ToolExecutor) WithHooks(hooks *ExecutionHooks) *ToolExecutor
WithHooks sets execution hooks and returns the executor for chaining
func (*ToolExecutor) WithTimeout ¶
func (e *ToolExecutor) WithTimeout(timeout time.Duration) *ToolExecutor
WithTimeout sets default timeout and returns the executor for chaining