348 lines
11 KiB
Go
348 lines
11 KiB
Go
package ai
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
|
|
"git.gostacks.org/iwasforcedtobehere/WiseTLP/autotlp/internal/config"
|
|
"git.gostacks.org/iwasforcedtobehere/WiseTLP/autotlp/pkg/types"
|
|
"git.gostacks.org/iwasforcedtobehere/WiseTLP/autotlp/pkg/utils"
|
|
)
|
|
|
|
type Client struct {
|
|
config *types.AIConfig
|
|
httpClient *http.Client
|
|
logger *utils.Logger
|
|
}
|
|
|
|
func NewClient(config *types.AIConfig, logger *utils.Logger) *Client {
|
|
return &Client{
|
|
config: config,
|
|
httpClient: &http.Client{
|
|
Timeout: 30 * time.Second,
|
|
},
|
|
logger: logger.WithComponent("ai"),
|
|
}
|
|
}
|
|
|
|
func ConfigureClient(logger *utils.Logger) (*Client, error) {
|
|
fmt.Println("\n" + strings.Repeat("=", 50))
|
|
fmt.Println("AI SERVICE CONFIGURATION")
|
|
fmt.Println(strings.Repeat("=", 50))
|
|
|
|
config := &types.AIConfig{}
|
|
|
|
fmt.Println("\nAvailable AI providers:")
|
|
fmt.Println("1. Groq (Fast inference)")
|
|
fmt.Println("2. OpenRouter (Multiple models)")
|
|
fmt.Println("3. Gemini (Google)")
|
|
fmt.Println("4. Custom (OpenAI-compatible)")
|
|
|
|
choice := utils.GetUserInput("Select provider (1-4)", "1")
|
|
|
|
switch choice {
|
|
case "1":
|
|
config.Provider = types.AIProviderGroq
|
|
config.Endpoint = "https://api.groq.com/openai/v1/chat/completions"
|
|
config.Model = "openai/gpt-oss-20b"
|
|
case "2":
|
|
config.Provider = types.AIProviderOpenRouter
|
|
config.Endpoint = "https://openrouter.ai/api/v1/chat/completions"
|
|
config.Model = utils.GetUserInput("Model name", "meta-llama/llama-3.1-8b-instruct:free")
|
|
case "3":
|
|
config.Provider = types.AIProviderGemini
|
|
config.Endpoint = "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent"
|
|
config.Model = "gemini-pro"
|
|
case "4":
|
|
config.Provider = types.AIProviderCustom
|
|
config.Endpoint = utils.GetUserInput("API endpoint", "")
|
|
config.Model = utils.GetUserInput("Model name", "")
|
|
default:
|
|
return nil, fmt.Errorf("invalid provider selection")
|
|
}
|
|
|
|
fmt.Printf("\nEnter your API key for %s: ", config.Provider)
|
|
var apiKey string
|
|
fmt.Scanln(&apiKey)
|
|
config.APIKey = strings.TrimSpace(apiKey)
|
|
|
|
if config.APIKey == "" {
|
|
return nil, fmt.Errorf("API key is required")
|
|
}
|
|
|
|
config.MaxTokens = 1500
|
|
config.Temperature = 0.3
|
|
|
|
client := NewClient(config, logger)
|
|
if err := client.validateConnection(context.Background()); err != nil {
|
|
return nil, fmt.Errorf("failed to validate AI connection: %w", err)
|
|
}
|
|
|
|
fmt.Println("✓ AI service configured successfully!")
|
|
return client, nil
|
|
}
|
|
|
|
func (c *Client) GenerateConfig(ctx context.Context, sysInfo *types.SystemInfo, preferences *types.UserPreferences) (*types.TLPConfiguration, error) {
|
|
c.logger.Info("Generating TLP configuration using AI", "provider", c.config.Provider, "model", c.config.Model)
|
|
|
|
prompt := c.buildPrompt(sysInfo, preferences)
|
|
response, err := c.makeRequest(ctx, prompt)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("AI request failed: %w", err)
|
|
}
|
|
|
|
config, err := c.parseResponse(response, sysInfo, preferences)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to parse AI response: %w", err)
|
|
}
|
|
|
|
c.logger.Info("TLP configuration generated successfully", "settings_count", len(config.Settings))
|
|
return config, nil
|
|
}
|
|
|
|
func (c *Client) buildPrompt(sysInfo *types.SystemInfo, preferences *types.UserPreferences) string {
|
|
var prompt strings.Builder
|
|
|
|
prompt.WriteString("Generate TLP configuration in JSON format.\n\n")
|
|
|
|
prompt.WriteString("System: ")
|
|
if sysInfo.Battery != nil && sysInfo.Battery.Present {
|
|
prompt.WriteString("Laptop")
|
|
} else {
|
|
prompt.WriteString("Desktop")
|
|
}
|
|
prompt.WriteString(fmt.Sprintf(", %s, %d cores\n", sysInfo.Distribution.ID, sysInfo.CPU.Cores))
|
|
|
|
prompt.WriteString(fmt.Sprintf("Profile: %s, Use: %s, Mode: %s\n\n", preferences.PowerProfile, preferences.UseCase, preferences.PerformanceMode))
|
|
|
|
prompt.WriteString("Return JSON with this structure:\n")
|
|
prompt.WriteString(`{"description": "Config description", "settings": {"TLP_ENABLE": "1", "CPU_SCALING_GOVERNOR_ON_AC": "performance", "CPU_SCALING_GOVERNOR_ON_BAT": "powersave", "DISK_APM_LEVEL_ON_AC": "254", "DISK_APM_LEVEL_ON_BAT": "128", "WIFI_PWR_ON_AC": "off", "WIFI_PWR_ON_BAT": "on", "USB_AUTOSUSPEND": "1"}, "rationale": {"CPU_SCALING_GOVERNOR_ON_AC": "Max performance on AC"}, "warnings": ["Monitor temperatures"]}` + "\n\n")
|
|
|
|
prompt.WriteString("Generate 8-10 TLP settings for this system.")
|
|
|
|
return prompt.String()
|
|
}
|
|
|
|
func (c *Client) makeRequest(ctx context.Context, prompt string) (string, error) {
|
|
c.logger.Debug("Making AI request", "prompt_length", len(prompt))
|
|
c.logger.Debug("Full prompt being sent", "prompt", prompt)
|
|
|
|
var requestBody interface{}
|
|
var endpoint string
|
|
|
|
switch c.config.Provider {
|
|
case types.AIProviderGemini:
|
|
requestBody = map[string]interface{}{
|
|
"contents": []map[string]interface{}{
|
|
{
|
|
"parts": []map[string]string{
|
|
{"text": prompt},
|
|
},
|
|
},
|
|
},
|
|
"generationConfig": map[string]interface{}{
|
|
"temperature": c.config.Temperature,
|
|
"maxOutputTokens": c.config.MaxTokens,
|
|
},
|
|
}
|
|
endpoint = c.config.Endpoint + "?key=" + c.config.APIKey
|
|
default:
|
|
requestBody = map[string]interface{}{
|
|
"model": c.config.Model,
|
|
"messages": []map[string]interface{}{
|
|
{
|
|
"role": "user",
|
|
"content": prompt,
|
|
},
|
|
},
|
|
"max_tokens": c.config.MaxTokens,
|
|
"temperature": c.config.Temperature,
|
|
"response_format": map[string]interface{}{
|
|
"type": "json_object",
|
|
},
|
|
}
|
|
endpoint = c.config.Endpoint
|
|
}
|
|
|
|
jsonBody, err := json.Marshal(requestBody)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to marshal request: %w", err)
|
|
}
|
|
|
|
c.logger.Debug("Request body", "json", string(jsonBody))
|
|
|
|
req, err := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewBuffer(jsonBody))
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to create request: %w", err)
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
if c.config.Provider != types.AIProviderGemini {
|
|
req.Header.Set("Authorization", "Bearer "+c.config.APIKey)
|
|
}
|
|
|
|
resp, err := c.httpClient.Do(req)
|
|
if err != nil {
|
|
return "", fmt.Errorf("request failed: %w", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
body, err := io.ReadAll(resp.Body)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to read response: %w", err)
|
|
}
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return "", fmt.Errorf("API request failed with status %d: %s", resp.StatusCode, string(body))
|
|
}
|
|
|
|
return c.extractContent(body)
|
|
}
|
|
|
|
func (c *Client) extractContent(responseBody []byte) (string, error) {
|
|
var response map[string]interface{}
|
|
if err := json.Unmarshal(responseBody, &response); err != nil {
|
|
return "", fmt.Errorf("failed to parse response JSON: %w", err)
|
|
}
|
|
|
|
switch c.config.Provider {
|
|
case types.AIProviderGemini:
|
|
if candidates, ok := response["candidates"].([]interface{}); ok && len(candidates) > 0 {
|
|
if candidate, ok := candidates[0].(map[string]interface{}); ok {
|
|
if content, ok := candidate["content"].(map[string]interface{}); ok {
|
|
if parts, ok := content["parts"].([]interface{}); ok && len(parts) > 0 {
|
|
if part, ok := parts[0].(map[string]interface{}); ok {
|
|
if text, ok := part["text"].(string); ok {
|
|
return text, nil
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
default:
|
|
if choices, ok := response["choices"].([]interface{}); ok && len(choices) > 0 {
|
|
if choice, ok := choices[0].(map[string]interface{}); ok {
|
|
if message, ok := choice["message"].(map[string]interface{}); ok {
|
|
if content, ok := message["content"].(string); ok {
|
|
return content, nil
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return "", fmt.Errorf("unexpected response format")
|
|
}
|
|
|
|
func (c *Client) parseResponse(response string, sysInfo *types.SystemInfo, preferences *types.UserPreferences) (*types.TLPConfiguration, error) {
|
|
c.logger.Debug("Raw AI response", "response", response)
|
|
|
|
start := strings.Index(response, "{")
|
|
end := strings.LastIndex(response, "}") + 1
|
|
|
|
if start == -1 || end == 0 {
|
|
c.logger.Error("No JSON found in AI response", "response_length", len(response), "response_preview", response[:min(200, len(response))])
|
|
c.logger.Info("Generating fallback TLP configuration")
|
|
return c.generateFallbackConfig(sysInfo, preferences), nil
|
|
}
|
|
|
|
jsonStr := response[start:end]
|
|
|
|
var aiResponse struct {
|
|
Description string `json:"description"`
|
|
Settings map[string]string `json:"settings"`
|
|
Rationale map[string]string `json:"rationale"`
|
|
Warnings []string `json:"warnings"`
|
|
}
|
|
|
|
if err := json.Unmarshal([]byte(jsonStr), &aiResponse); err != nil {
|
|
c.logger.Error("Failed to parse AI JSON response", "error", err, "json_str", jsonStr)
|
|
c.logger.Info("Generating fallback TLP configuration due to JSON parse error")
|
|
return c.generateFallbackConfig(sysInfo, preferences), nil
|
|
}
|
|
|
|
if len(aiResponse.Settings) == 0 {
|
|
return nil, fmt.Errorf("AI response contains no settings")
|
|
}
|
|
|
|
config := &types.TLPConfiguration{
|
|
Settings: aiResponse.Settings,
|
|
Description: aiResponse.Description,
|
|
Rationale: aiResponse.Rationale,
|
|
Warnings: aiResponse.Warnings,
|
|
Generated: time.Now(),
|
|
SystemInfo: sysInfo,
|
|
Preferences: preferences,
|
|
}
|
|
|
|
if _, exists := config.Settings["TLP_ENABLE"]; !exists {
|
|
config.Settings["TLP_ENABLE"] = "1"
|
|
}
|
|
|
|
if config.Rationale == nil {
|
|
config.Rationale = make(map[string]string)
|
|
}
|
|
|
|
return config, nil
|
|
}
|
|
|
|
func (c *Client) validateConnection(ctx context.Context) error {
|
|
testPrompt := "Respond with a JSON object containing 'status': 'OK' to confirm the connection is working."
|
|
|
|
response, err := c.makeRequest(ctx, testPrompt)
|
|
if err != nil {
|
|
return fmt.Errorf("connection test failed: %w", err)
|
|
}
|
|
|
|
if !strings.Contains(strings.ToUpper(response), "OK") {
|
|
return fmt.Errorf("unexpected response from AI service: %s", response)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func min(a, b int) int {
|
|
if a < b {
|
|
return a
|
|
}
|
|
return b
|
|
}
|
|
|
|
func (c *Client) generateFallbackConfig(sysInfo *types.SystemInfo, preferences *types.UserPreferences) *types.TLPConfiguration {
|
|
c.logger.Info("Generating fallback configuration based on user preferences")
|
|
|
|
settings := config.GetRecommendedSettings(preferences)
|
|
|
|
rationale := make(map[string]string)
|
|
rationale["TLP_ENABLE"] = "Enable TLP power management"
|
|
rationale["CPU_SCALING_GOVERNOR_ON_AC"] = "CPU governor for AC power based on user preferences"
|
|
rationale["CPU_SCALING_GOVERNOR_ON_BAT"] = "CPU governor for battery power based on user preferences"
|
|
|
|
warnings := []string{
|
|
"This is a fallback configuration generated when AI service was unavailable",
|
|
"Configuration is based on user preferences and common best practices",
|
|
"Consider running WiseTLP again when AI service is available for optimized settings",
|
|
}
|
|
|
|
description := fmt.Sprintf("Fallback TLP configuration for %s use case with %s power profile",
|
|
preferences.UseCase, preferences.PowerProfile)
|
|
|
|
return &types.TLPConfiguration{
|
|
Settings: settings,
|
|
Description: description,
|
|
Rationale: rationale,
|
|
Warnings: warnings,
|
|
Generated: time.Now(),
|
|
SystemInfo: sysInfo,
|
|
Preferences: preferences,
|
|
}
|
|
}
|