TTW_Bot_GO/internal/ai/ollama.go

72 lines
1.7 KiB
Go

package ai
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
type OllamaProvider struct {
endpoint string
model string
systemPrompt string
client *http.Client
}
type ollamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
System string `json:"system,omitempty"`
Stream bool `json:"stream"`
}
type ollamaResponse struct {
Response string `json:"response"`
}
func NewOllamaProvider(endpoint, model, systemPrompt string) *OllamaProvider {
if endpoint == "" {
endpoint = "http://localhost:11434"
}
return &OllamaProvider{
endpoint: endpoint,
model: model,
systemPrompt: systemPrompt,
client: &http.Client{Timeout: 30 * time.Second},
}
}
func (p *OllamaProvider) Ask(ctx context.Context, prompt string) (string, error) {
reqBody := ollamaRequest{
Model: p.model,
Prompt: prompt,
System: p.systemPrompt,
Stream: false,
}
jsonData, err := json.Marshal(reqBody)
if err != nil {
return "", err
}
url := p.endpoint + "/api/generate"
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(jsonData))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
resp, err := p.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("ollama error: %d", resp.StatusCode)
}
var ollamaResp ollamaResponse
if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil {
return "", err
}
return ollamaResp.Response, nil
}