134 lines
3.6 KiB
Go
134 lines
3.6 KiB
Go
package main
|
||
|
||
import (
|
||
"bufio"
|
||
"context"
|
||
"fmt"
|
||
"os"
|
||
"os/signal"
|
||
"strings"
|
||
"syscall"
|
||
|
||
"github.com/sashabaranov/go-openai"
|
||
"github.com/tell-me/config"
|
||
"github.com/tell-me/llm"
|
||
)
|
||
|
||
func main() {
|
||
// Load configuration
|
||
cfg, err := config.Load()
|
||
if err != nil {
|
||
fmt.Fprintf(os.Stderr, "Error loading configuration: %v\n", err)
|
||
fmt.Fprintf(os.Stderr, "Please create ~/.config/tell-me.ini from tell-me.ini.example\n")
|
||
os.Exit(1)
|
||
}
|
||
|
||
// Create LLM client
|
||
client := llm.NewClient(
|
||
cfg.LLM.APIURL,
|
||
cfg.LLM.APIKey,
|
||
cfg.LLM.Model,
|
||
cfg.LLM.ContextSize,
|
||
cfg.SearXNG.URL,
|
||
)
|
||
|
||
// Initialize conversation with system prompt
|
||
messages := []openai.ChatCompletionMessage{
|
||
{
|
||
Role: openai.ChatMessageRoleSystem,
|
||
Content: llm.GetSystemPrompt(),
|
||
},
|
||
}
|
||
|
||
ctx := context.Background()
|
||
|
||
// Check if arguments are provided (non-interactive mode)
|
||
if len(os.Args) > 1 {
|
||
query := strings.Join(os.Args[1:], " ")
|
||
processQuery(ctx, client, messages, query)
|
||
return
|
||
}
|
||
|
||
// Setup signal handling for Ctrl-C
|
||
sigChan := make(chan os.Signal, 1)
|
||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||
go func() {
|
||
<-sigChan
|
||
fmt.Println("\n\nGoodbye!")
|
||
os.Exit(0)
|
||
}()
|
||
|
||
// Print welcome message
|
||
fmt.Println("╔════════════════════════════════════════════════════════════════╗")
|
||
fmt.Println("║ Tell-Me CLI ║")
|
||
fmt.Println("║ AI-powered search with local LLM support ║")
|
||
fmt.Println("╚════════════════════════════════════════════════════════════════╝")
|
||
fmt.Println()
|
||
fmt.Printf("Using model: %s\n", cfg.LLM.Model)
|
||
fmt.Printf("SearXNG: %s\n", cfg.SearXNG.URL)
|
||
fmt.Println()
|
||
fmt.Println("Type your questions below. Type 'exit' or 'quit' to exit, or press Ctrl-C.")
|
||
fmt.Println("────────────────────────────────────────────────────────────────")
|
||
fmt.Println()
|
||
|
||
// Create scanner for user input
|
||
scanner := bufio.NewScanner(os.Stdin)
|
||
|
||
for {
|
||
// Prompt for user input
|
||
fmt.Print("❯ ")
|
||
if !scanner.Scan() {
|
||
break
|
||
}
|
||
|
||
userInput := strings.TrimSpace(scanner.Text())
|
||
|
||
// Check for exit commands
|
||
if userInput == "exit" || userInput == "quit" {
|
||
fmt.Println("\nGoodbye!")
|
||
break
|
||
}
|
||
|
||
// Skip empty input
|
||
if userInput == "" {
|
||
continue
|
||
}
|
||
|
||
// Process the query
|
||
messages = processQuery(ctx, client, messages, userInput)
|
||
}
|
||
|
||
if err := scanner.Err(); err != nil {
|
||
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
|
||
os.Exit(1)
|
||
}
|
||
}
|
||
|
||
// processQuery handles a single query and returns updated messages
|
||
func processQuery(ctx context.Context, client *llm.Client, messages []openai.ChatCompletionMessage, userInput string) []openai.ChatCompletionMessage {
|
||
// Add user message to conversation
|
||
messages = append(messages, openai.ChatCompletionMessage{
|
||
Role: openai.ChatMessageRoleUser,
|
||
Content: userInput,
|
||
})
|
||
|
||
// Get response from LLM
|
||
fmt.Println()
|
||
response, updatedMessages, err := client.Chat(ctx, messages)
|
||
if err != nil {
|
||
fmt.Fprintf(os.Stderr, "\nError: %v\n\n", err)
|
||
// Remove the failed user message
|
||
return messages[:len(messages)-1]
|
||
}
|
||
|
||
// Update messages with the full conversation history
|
||
messages = updatedMessages
|
||
|
||
// Print response with empty line before it
|
||
fmt.Println()
|
||
fmt.Println(response)
|
||
fmt.Println()
|
||
|
||
return messages
|
||
}
|