Refactored, added comprehensive testing.
All checks were successful
Release / release (push) Successful in 3m17s

This commit is contained in:
Jay
2025-10-26 23:23:43 -04:00
parent ec32b75267
commit 1936f055e2
61 changed files with 4678 additions and 769 deletions

141
config/config.go Normal file
View File

@@ -0,0 +1,141 @@
package config
import (
"fmt"
"os"
)
const UsageText = `Usage: aicli [OPTION]...
Send prompts and files to LLM chat endpoints.
Global:
--version display version and exit
Input:
-f, --file PATH input file (repeatable)
-F, --stdin-file treat stdin as file content
-p, --prompt TEXT prompt text (repeatable)
-pf, --prompt-file PATH read prompt from file
System:
-s, --system TEXT system prompt text
-sf, --system-file PATH read system prompt from file
(error if both -s and -sf provided)
API:
-l, --protocol PROTO openai or ollama (default: openai)
-u, --url URL endpoint (default: https://api.ppq.ai/chat/completions)
-k, --key KEY API key
-kf, --key-file PATH read API key from file
Models:
-m, --model NAME primary model (default: gpt-4o-mini)
-b, --fallback NAMES comma-separated fallback list (default: gpt-4.1-mini)
Output:
-o, --output PATH write to file (mode 0644) instead of stdout
-q, --quiet suppress progress messages
-v, --verbose log debug information to stderr
Config:
-c, --config PATH YAML config file
Environment Variables:
AICLI_API_KEY API key
AICLI_API_KEY_FILE path to API key file
AICLI_PROTOCOL API protocol
AICLI_URL endpoint URL
AICLI_MODEL primary model name
AICLI_FALLBACK comma-separated fallback models
AICLI_SYSTEM system prompt text
AICLI_SYSTEM_FILE path to system prompt file
AICLI_CONFIG_FILE path to config file
AICLI_PROMPT_FILE path to prompt file
AICLI_DEFAULT_PROMPT override default prompt
Precedence Rules:
API key: --key > --key-file > AICLI_API_KEY > AICLI_API_KEY_FILE > config key_file
System: --system > --system-file > AICLI_SYSTEM > AICLI_SYSTEM_FILE > config system_file
Config file: --config > AICLI_CONFIG_FILE
All others: flags > environment > config file > defaults
Stdin Behavior:
No flags: stdin becomes the prompt
With -p/-pf: stdin appends after explicit prompts
With -F: stdin becomes first file (path: "input")
Examples:
echo "What is Rust?" | aicli
cat log.txt | aicli -F -p "Find errors in this log"
aicli -f main.go -p "Review this code"
aicli -c ~/.aicli.yaml -f src/main.go -f src/util.go -o analysis.md
aicli -p "Context:" -pf template.txt -p "Apply to finance sector"
`
func printUsage() {
fmt.Fprint(os.Stderr, UsageText)
}
// BuildConfig resolves configuration from all sources with precedence:
// flags > env > file > defaults
func BuildConfig(args []string) (ConfigData, error) {
flags, err := parseFlags(args)
if err != nil {
return ConfigData{}, fmt.Errorf("parse flags: %w", err)
}
// Validate protocol strings before merge
if flags.protocol != "" && flags.protocol != "openai" && flags.protocol != "ollama" {
return ConfigData{}, fmt.Errorf("invalid protocol: must be openai or ollama, got: %s", flags.protocol)
}
configPath := flags.config
if configPath == "" {
configPath = os.Getenv("AICLI_CONFIG_FILE")
}
env := loadEnvironment()
// Validate env protocol
if env.protocol != "" && env.protocol != "openai" && env.protocol != "ollama" {
return ConfigData{}, fmt.Errorf("invalid protocol: must be openai or ollama, got: %s", env.protocol)
}
file, err := loadConfigFile(configPath)
if err != nil {
return ConfigData{}, fmt.Errorf("load config file: %w", err)
}
// Validate file protocol
if file.protocol != "" && file.protocol != "openai" && file.protocol != "ollama" {
return ConfigData{}, fmt.Errorf("invalid protocol: must be openai or ollama, got: %s", file.protocol)
}
cfg := mergeSources(flags, env, file)
if err := validateConfig(cfg); err != nil {
return ConfigData{}, err
}
return cfg, nil
}
// IsVersionRequest checks if --version flag was passed
func IsVersionRequest(args []string) bool {
for _, arg := range args {
if arg == "--version" {
return true
}
}
return false
}
// IsHelpRequest checks if -h or --help flag was passed
func IsHelpRequest(args []string) bool {
for _, arg := range args {
if arg == "-h" || arg == "--help" {
return true
}
}
return false
}

77
config/config_test.go Normal file
View File

@@ -0,0 +1,77 @@
package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestBuildConfig(t *testing.T) {
tests := []struct {
name string
args []string
env map[string]string
wantErr bool
check func(*testing.T, ConfigData)
}{
{
name: "valid full config",
args: []string{"-k", "sk-test", "-m", "gpt-4"},
check: func(t *testing.T, cfg ConfigData) {
assert.Equal(t, "sk-test", cfg.APIKey)
assert.Equal(t, "gpt-4", cfg.Model)
},
},
{
name: "config file from env",
args: []string{"-k", "sk-test"},
env: map[string]string{"AICLI_CONFIG_FILE": "testdata/partial.yaml"},
check: func(t *testing.T, cfg ConfigData) {
assert.Equal(t, "gpt-4", cfg.Model)
},
},
{
name: "missing api key",
args: []string{},
wantErr: true,
},
{
name: "invalid config file",
args: []string{"-c", "testdata/invalid.yaml", "-k", "test"},
wantErr: true,
},
{
name: "invalid protocol in flags",
args: []string{"-k", "sk-test", "-l", "invalid"},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Clear all AICLI_* env vars
t.Setenv("AICLI_API_KEY", "")
t.Setenv("AICLI_API_KEY_FILE", "")
t.Setenv("AICLI_PROTOCOL", "")
t.Setenv("AICLI_URL", "")
t.Setenv("AICLI_MODEL", "")
t.Setenv("AICLI_FALLBACK", "")
t.Setenv("AICLI_SYSTEM", "")
t.Setenv("AICLI_CONFIG_FILE", "")
// Apply test-specific env
for k, v := range tt.env {
t.Setenv(k, v)
}
cfg, err := BuildConfig(tt.args)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
if tt.check != nil {
tt.check(t, cfg)
}
})
}
}

11
config/defaults.go Normal file
View File

@@ -0,0 +1,11 @@
package config
var defaultConfig = ConfigData{
StdinAsFile: false,
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
Quiet: false,
Verbose: false,
}

34
config/env.go Normal file
View File

@@ -0,0 +1,34 @@
package config
import "os"
import "strings"
func loadEnvironment() envValues {
ev := envValues{}
if val := os.Getenv("AICLI_PROTOCOL"); val != "" {
ev.protocol = val
}
if val := os.Getenv("AICLI_URL"); val != "" {
ev.url = val
}
if val := os.Getenv("AICLI_API_KEY"); val != "" {
ev.key = val
} else if val := os.Getenv("AICLI_API_KEY_FILE"); val != "" {
content, err := os.ReadFile(val)
if err == nil {
ev.key = strings.TrimSpace(string(content))
}
}
if val := os.Getenv("AICLI_MODEL"); val != "" {
ev.model = val
}
if val := os.Getenv("AICLI_FALLBACK"); val != "" {
ev.fallback = val
}
if val := os.Getenv("AICLI_SYSTEM"); val != "" {
ev.system = val
}
return ev
}

134
config/env_test.go Normal file
View File

@@ -0,0 +1,134 @@
package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestLoadEnvironment(t *testing.T) {
tests := []struct {
name string
env map[string]string
want envValues
}{
{
name: "empty environment",
env: map[string]string{},
want: envValues{},
},
{
name: "protocol only",
env: map[string]string{"AICLI_PROTOCOL": "ollama"},
want: envValues{protocol: "ollama"},
},
{
name: "url only",
env: map[string]string{"AICLI_URL": "http://localhost:11434"},
want: envValues{url: "http://localhost:11434"},
},
{
name: "api key direct",
env: map[string]string{"AICLI_API_KEY": "sk-test123"},
want: envValues{key: "sk-test123"},
},
{
name: "model only",
env: map[string]string{"AICLI_MODEL": "llama3"},
want: envValues{model: "llama3"},
},
{
name: "fallback only",
env: map[string]string{"AICLI_FALLBACK": "gpt-3.5,gpt-4"},
want: envValues{fallback: "gpt-3.5,gpt-4"},
},
{
name: "system only",
env: map[string]string{"AICLI_SYSTEM": "You are helpful"},
want: envValues{system: "You are helpful"},
},
{
name: "all variables set",
env: map[string]string{
"AICLI_PROTOCOL": "openai",
"AICLI_URL": "https://api.openai.com/v1/chat/completions",
"AICLI_API_KEY": "sk-abc",
"AICLI_MODEL": "gpt-4",
"AICLI_FALLBACK": "gpt-3.5",
"AICLI_SYSTEM": "system prompt",
},
want: envValues{
protocol: "openai",
url: "https://api.openai.com/v1/chat/completions",
key: "sk-abc",
model: "gpt-4",
fallback: "gpt-3.5",
system: "system prompt",
},
},
{
name: "empty string values preserved",
env: map[string]string{"AICLI_SYSTEM": ""},
want: envValues{system: ""},
},
{
name: "whitespace preserved",
env: map[string]string{"AICLI_SYSTEM": " spaces "},
want: envValues{system: " spaces "},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set environment
for k, v := range tt.env {
t.Setenv(k, v)
}
got := loadEnvironment()
assert.Equal(t, tt.want, got)
})
}
}
func TestLoadEnvironmentKeyFile(t *testing.T) {
tests := []struct {
name string
env map[string]string
want envValues
}{
{
name: "key file when no direct key",
env: map[string]string{"AICLI_API_KEY_FILE": "testdata/api.key"},
want: envValues{key: "sk-test-key-123"},
},
{
name: "direct key overrides key file",
env: map[string]string{
"AICLI_API_KEY": "sk-direct",
"AICLI_API_KEY_FILE": "testdata/api.key",
},
want: envValues{key: "sk-direct"},
},
{
name: "key file not found",
env: map[string]string{"AICLI_API_KEY_FILE": "/nonexistent/key.txt"},
want: envValues{},
},
{
name: "key file with whitespace trimmed",
env: map[string]string{"AICLI_API_KEY_FILE": "testdata/api_whitespace.key"},
want: envValues{key: "sk-whitespace-key"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for k, v := range tt.env {
t.Setenv(k, v)
}
got := loadEnvironment()
assert.Equal(t, tt.want, got)
})
}
}

44
config/file.go Normal file
View File

@@ -0,0 +1,44 @@
package config
import (
"gopkg.in/yaml.v3"
"os"
)
func loadConfigFile(path string) (fileValues, error) {
if path == "" {
return fileValues{}, nil
}
data, err := os.ReadFile(path)
if err != nil {
return fileValues{}, err
}
var raw map[string]interface{}
if err := yaml.Unmarshal(data, &raw); err != nil {
return fileValues{}, err
}
fv := fileValues{}
if v, ok := raw["protocol"].(string); ok {
fv.protocol = v
}
if v, ok := raw["url"].(string); ok {
fv.url = v
}
if v, ok := raw["key_file"].(string); ok {
fv.keyFile = v
}
if v, ok := raw["model"].(string); ok {
fv.model = v
}
if v, ok := raw["fallback"].(string); ok {
fv.fallback = v
}
if v, ok := raw["system_file"].(string); ok {
fv.systemFile = v
}
return fv, nil
}

76
config/file_test.go Normal file
View File

@@ -0,0 +1,76 @@
package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestLoadConfigFile(t *testing.T) {
tests := []struct {
name string
path string
want fileValues
wantErr bool
}{
{
name: "empty path returns nil",
path: "",
want: fileValues{},
},
{
name: "valid config",
path: "testdata/valid.yaml",
want: fileValues{
protocol: "ollama",
url: "http://localhost:11434/api/chat",
keyFile: "~/.aicli_key",
model: "llama3",
fallback: "llama2,mistral",
systemFile: "~/system.txt",
},
},
{
name: "partial config",
path: "testdata/partial.yaml",
want: fileValues{
model: "gpt-4",
fallback: "gpt-3.5-turbo",
},
},
{
name: "empty file",
path: "testdata/empty.yaml",
want: fileValues{},
},
{
name: "file not found",
path: "testdata/nonexistent.yaml",
wantErr: true,
},
{
name: "invalid yaml syntax",
path: "testdata/invalid.yaml",
wantErr: true,
},
{
name: "unknown keys ignored",
path: "testdata/unknown_keys.yaml",
want: fileValues{
protocol: "openai",
model: "gpt-4",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := loadConfigFile(tt.path)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.Equal(t, tt.want, got)
})
}
}

81
config/flags.go Normal file
View File

@@ -0,0 +1,81 @@
package config
import "flag"
type stringSlice []string
func (s *stringSlice) String() string {
if s == nil {
return ""
}
return ""
}
func (s *stringSlice) Set(value string) error {
*s = append(*s, value)
return nil
}
func parseFlags(args []string) (flagValues, error) {
fv := flagValues{}
fs := flag.NewFlagSet("aicli", flag.ContinueOnError)
fs.Usage = printUsage
var files stringSlice
var prompts stringSlice
// Input flags
fs.Var(&files, "f", "")
fs.Var(&files, "file", "")
fs.Var(&prompts, "p", "")
fs.Var(&prompts, "prompt", "")
fs.StringVar(&fv.promptFile, "pf", "", "")
fs.StringVar(&fv.promptFile, "prompt-file", "", "")
// System flags
fs.StringVar(&fv.system, "s", "", "")
fs.StringVar(&fv.system, "system", "", "")
fs.StringVar(&fv.systemFile, "sf", "", "")
fs.StringVar(&fv.systemFile, "system-file", "", "")
// API flags
fs.StringVar(&fv.key, "k", "", "")
fs.StringVar(&fv.key, "key", "", "")
fs.StringVar(&fv.keyFile, "kf", "", "")
fs.StringVar(&fv.keyFile, "key-file", "", "")
fs.StringVar(&fv.protocol, "l", "", "")
fs.StringVar(&fv.protocol, "protocol", "", "")
fs.StringVar(&fv.url, "u", "", "")
fs.StringVar(&fv.url, "url", "", "")
// Model flags
fs.StringVar(&fv.model, "m", "", "")
fs.StringVar(&fv.model, "model", "", "")
fs.StringVar(&fv.fallback, "b", "", "")
fs.StringVar(&fv.fallback, "fallback", "", "")
// Output flags
fs.StringVar(&fv.output, "o", "", "")
fs.StringVar(&fv.output, "output", "", "")
fs.StringVar(&fv.config, "c", "", "")
fs.StringVar(&fv.config, "config", "", "")
// Boolean flags
fs.BoolVar(&fv.stdinFile, "F", false, "")
fs.BoolVar(&fv.stdinFile, "stdin-file", false, "")
fs.BoolVar(&fv.quiet, "q", false, "")
fs.BoolVar(&fv.quiet, "quiet", false, "")
fs.BoolVar(&fv.verbose, "v", false, "")
fs.BoolVar(&fv.verbose, "verbose", false, "")
fs.BoolVar(&fv.version, "version", false, "")
if err := fs.Parse(args); err != nil {
return flagValues{}, err
}
fv.files = files
fv.prompts = prompts
return fv, nil
}

268
config/flags_test.go Normal file
View File

@@ -0,0 +1,268 @@
package config
import (
"flag"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func resetFlags() {
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
}
func TestParseFlags(t *testing.T) {
tests := []struct {
name string
args []string
want flagValues
}{
{
name: "empty args",
args: []string{},
want: flagValues{},
},
{
name: "single file short flag",
args: []string{"-f", "main.go"},
want: flagValues{files: []string{"main.go"}},
},
{
name: "single file long flag",
args: []string{"--file", "main.go"},
want: flagValues{files: []string{"main.go"}},
},
{
name: "multiple files",
args: []string{"-f", "a.go", "-f", "b.go", "--file", "c.go"},
want: flagValues{files: []string{"a.go", "b.go", "c.go"}},
},
{
name: "single prompt short flag",
args: []string{"-p", "analyze this"},
want: flagValues{prompts: []string{"analyze this"}},
},
{
name: "single prompt long flag",
args: []string{"--prompt", "analyze this"},
want: flagValues{prompts: []string{"analyze this"}},
},
{
name: "multiple prompts",
args: []string{"-p", "first", "-p", "second", "--prompt", "third"},
want: flagValues{prompts: []string{"first", "second", "third"}},
},
{
name: "prompt file",
args: []string{"-pf", "prompt.txt"},
want: flagValues{promptFile: "prompt.txt"},
},
{
name: "prompt file long",
args: []string{"--prompt-file", "prompt.txt"},
want: flagValues{promptFile: "prompt.txt"},
},
{
name: "system short",
args: []string{"-s", "You are helpful"},
want: flagValues{system: "You are helpful"},
},
{
name: "system long",
args: []string{"--system", "You are helpful"},
want: flagValues{system: "You are helpful"},
},
{
name: "system file short",
args: []string{"-sf", "system.txt"},
want: flagValues{systemFile: "system.txt"},
},
{
name: "system file long",
args: []string{"--system-file", "system.txt"},
want: flagValues{systemFile: "system.txt"},
},
{
name: "key short",
args: []string{"-k", "sk-abc123"},
want: flagValues{key: "sk-abc123"},
},
{
name: "key long",
args: []string{"--key", "sk-abc123"},
want: flagValues{key: "sk-abc123"},
},
{
name: "key file short",
args: []string{"-kf", "api.key"},
want: flagValues{keyFile: "api.key"},
},
{
name: "key file long",
args: []string{"--key-file", "api.key"},
want: flagValues{keyFile: "api.key"},
},
{
name: "protocol short",
args: []string{"-l", "ollama"},
want: flagValues{protocol: "ollama"},
},
{
name: "protocol long",
args: []string{"--protocol", "ollama"},
want: flagValues{protocol: "ollama"},
},
{
name: "url short",
args: []string{"-u", "http://localhost:11434"},
want: flagValues{url: "http://localhost:11434"},
},
{
name: "url long",
args: []string{"--url", "http://localhost:11434"},
want: flagValues{url: "http://localhost:11434"},
},
{
name: "model short",
args: []string{"-m", "gpt-4"},
want: flagValues{model: "gpt-4"},
},
{
name: "model long",
args: []string{"--model", "gpt-4"},
want: flagValues{model: "gpt-4"},
},
{
name: "fallback short",
args: []string{"-b", "gpt-3.5-turbo"},
want: flagValues{fallback: "gpt-3.5-turbo"},
},
{
name: "fallback long",
args: []string{"--fallback", "gpt-3.5-turbo"},
want: flagValues{fallback: "gpt-3.5-turbo"},
},
{
name: "output short",
args: []string{"-o", "result.txt"},
want: flagValues{output: "result.txt"},
},
{
name: "output long",
args: []string{"--output", "result.txt"},
want: flagValues{output: "result.txt"},
},
{
name: "config short",
args: []string{"-c", "config.yaml"},
want: flagValues{config: "config.yaml"},
},
{
name: "config long",
args: []string{"--config", "config.yaml"},
want: flagValues{config: "config.yaml"},
},
{
name: "stdin file short",
args: []string{"-F"},
want: flagValues{stdinFile: true},
},
{
name: "stdin file long",
args: []string{"--stdin-file"},
want: flagValues{stdinFile: true},
},
{
name: "quiet short",
args: []string{"-q"},
want: flagValues{quiet: true},
},
{
name: "quiet long",
args: []string{"--quiet"},
want: flagValues{quiet: true},
},
{
name: "verbose short",
args: []string{"-v"},
want: flagValues{verbose: true},
},
{
name: "verbose long",
args: []string{"--verbose"},
want: flagValues{verbose: true},
},
{
name: "version flag",
args: []string{"--version"},
want: flagValues{version: true},
},
{
name: "complex combination",
args: []string{
"-f", "a.go",
"-f", "b.go",
"-p", "first prompt",
"-pf", "prompt.txt",
"-s", "system prompt",
"-k", "key123",
"-m", "gpt-4",
"-b", "gpt-3.5",
"-o", "out.txt",
"-q",
"-v",
},
want: flagValues{
files: []string{"a.go", "b.go"},
prompts: []string{"first prompt"},
promptFile: "prompt.txt",
system: "system prompt",
key: "key123",
model: "gpt-4",
fallback: "gpt-3.5",
output: "out.txt",
quiet: true,
verbose: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resetFlags()
got, err := parseFlags(tt.args)
assert.NoError(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func TestParseFlagsErrors(t *testing.T) {
tests := []struct {
name string
args []string
}{
{
name: "unknown flag",
args: []string{"--unknown"},
},
{
name: "flag without value",
args: []string{"-f"},
},
{
name: "model without value",
args: []string{"-m"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resetFlags()
_, err := parseFlags(tt.args)
assert.Error(t, err, "parseFlags() should return error for %s", tt.name)
})
}
}

112
config/merge.go Normal file
View File

@@ -0,0 +1,112 @@
package config
import (
"os"
"strings"
)
func mergeSources(flags flagValues, env envValues, file fileValues) ConfigData {
cfg := defaultConfig
// Apply file values
if file.protocol != "" {
cfg.Protocol = parseProtocol(file.protocol)
}
if file.url != "" {
cfg.URL = file.url
}
if file.model != "" {
cfg.Model = file.model
}
if file.fallback != "" {
cfg.FallbackModels = strings.Split(file.fallback, ",")
}
// Apply env values
if env.protocol != "" {
cfg.Protocol = parseProtocol(env.protocol)
}
if env.url != "" {
cfg.URL = env.url
}
if env.model != "" {
cfg.Model = env.model
}
if env.fallback != "" {
cfg.FallbackModels = strings.Split(env.fallback, ",")
}
if env.system != "" {
cfg.SystemPrompt = env.system
}
if env.key != "" {
cfg.APIKey = env.key
}
// Apply flag values
if flags.protocol != "" {
cfg.Protocol = parseProtocol(flags.protocol)
}
if flags.url != "" {
cfg.URL = flags.url
}
if flags.model != "" {
cfg.Model = flags.model
}
if flags.fallback != "" {
cfg.FallbackModels = strings.Split(flags.fallback, ",")
}
if flags.output != "" {
cfg.Output = flags.output
}
cfg.Quiet = flags.quiet
cfg.Verbose = flags.verbose
cfg.StdinAsFile = flags.stdinFile
// Collect input paths
cfg.FilePaths = flags.files
cfg.PromptFlags = flags.prompts
if flags.promptFile != "" {
cfg.PromptPaths = []string{flags.promptFile}
}
// Resolve system prompt (direct > file)
if flags.system != "" {
cfg.SystemPrompt = flags.system
} else if flags.systemFile != "" {
content, err := os.ReadFile(flags.systemFile)
if err == nil {
cfg.SystemPrompt = strings.TrimRight(string(content), "\n")
}
} else if file.systemFile != "" && cfg.SystemPrompt == "" {
content, err := os.ReadFile(file.systemFile)
if err == nil {
cfg.SystemPrompt = strings.TrimRight(string(content), "\n")
}
}
// Resolve API key (direct > file)
if flags.key != "" {
cfg.APIKey = flags.key
} else if flags.keyFile != "" {
content, err := os.ReadFile(flags.keyFile)
if err == nil {
cfg.APIKey = strings.TrimSpace(string(content))
}
} else if cfg.APIKey == "" && file.keyFile != "" {
content, err := os.ReadFile(file.keyFile)
if err == nil {
cfg.APIKey = strings.TrimSpace(string(content))
}
}
return cfg
}
func parseProtocol(s string) APIProtocol {
switch s {
case "ollama":
return ProtocolOllama
default:
return ProtocolOpenAI
}
}

373
config/merge_test.go Normal file
View File

@@ -0,0 +1,373 @@
package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestMergeSources(t *testing.T) {
tests := []struct {
name string
flags flagValues
env envValues
file fileValues
want ConfigData
}{
{
name: "all empty uses defaults",
flags: flagValues{},
env: envValues{},
file: fileValues{},
want: defaultConfig,
},
{
name: "file overrides defaults",
flags: flagValues{},
env: envValues{},
file: fileValues{
protocol: "ollama",
model: "llama3",
},
want: ConfigData{
Protocol: ProtocolOllama,
URL: "https://api.ppq.ai/chat/completions",
Model: "llama3",
FallbackModels: []string{"gpt-4.1-mini"},
},
},
{
name: "env overrides file",
flags: flagValues{},
env: envValues{
model: "gpt-4",
},
file: fileValues{
model: "llama3",
},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4",
FallbackModels: []string{"gpt-4.1-mini"},
},
},
{
name: "flags override env",
flags: flagValues{
model: "claude-3",
},
env: envValues{
model: "gpt-4",
},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "claude-3",
FallbackModels: []string{"gpt-4.1-mini"},
},
},
{
name: "full precedence chain",
flags: flagValues{
protocol: "ollama",
quiet: true,
},
env: envValues{
protocol: "openai",
model: "gpt-4",
url: "http://custom.api",
},
file: fileValues{
protocol: "openai",
model: "llama3",
url: "http://file.api",
fallback: "mistral",
},
want: ConfigData{
Protocol: ProtocolOllama,
URL: "http://custom.api",
Model: "gpt-4",
FallbackModels: []string{"mistral"},
Quiet: true,
},
},
{
name: "fallback string split",
flags: flagValues{
fallback: "model1,model2,model3",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"model1", "model2", "model3"},
},
},
{
name: "direct key flag",
flags: flagValues{
key: "sk-direct",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-direct",
},
},
{
name: "direct system flag",
flags: flagValues{
system: "You are helpful",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "You are helpful",
},
},
{
name: "file paths collected",
flags: flagValues{
files: []string{"a.go", "b.go"},
prompts: []string{"prompt1", "prompt2"},
promptFile: "prompt.txt",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
FilePaths: []string{"a.go", "b.go"},
PromptFlags: []string{"prompt1", "prompt2"},
PromptPaths: []string{"prompt.txt"},
},
},
{
name: "stdin file flag",
flags: flagValues{
stdinFile: true,
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
StdinAsFile: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := mergeSources(tt.flags, tt.env, tt.file)
assert.Equal(t, tt.want, got)
})
}
}
func TestMergeSourcesKeyFile(t *testing.T) {
tests := []struct {
name string
flags flagValues
env envValues
file fileValues
want ConfigData
}{
{
name: "key file from flags",
flags: flagValues{
keyFile: "testdata/api.key",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-test-key-123",
},
},
{
name: "key file from file config",
flags: flagValues{},
env: envValues{},
file: fileValues{
keyFile: "testdata/api.key",
},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-test-key-123",
},
},
{
name: "direct key overrides key file",
flags: flagValues{
key: "sk-direct",
keyFile: "testdata/api.key",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-direct",
},
},
{
name: "env key overrides file key file",
flags: flagValues{},
env: envValues{
key: "sk-env",
},
file: fileValues{
keyFile: "testdata/api.key",
},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-env",
},
},
{
name: "key file with whitespace trimmed",
flags: flagValues{
keyFile: "testdata/api_whitespace.key",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
APIKey: "sk-whitespace-key",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := mergeSources(tt.flags, tt.env, tt.file)
assert.Equal(t, tt.want, got)
})
}
}
func TestMergeSourcesSystemFile(t *testing.T) {
tests := []struct {
name string
flags flagValues
env envValues
file fileValues
want ConfigData
}{
{
name: "system file from flags",
flags: flagValues{
systemFile: "testdata/system.txt",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "You are a helpful assistant.",
},
},
{
name: "system file from file config",
flags: flagValues{},
env: envValues{},
file: fileValues{
systemFile: "testdata/system.txt",
},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "You are a helpful assistant.",
},
},
{
name: "direct system overrides system file",
flags: flagValues{
system: "Direct system",
systemFile: "testdata/system.txt",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "Direct system",
},
},
{
name: "env system overrides file system file",
flags: flagValues{},
env: envValues{
system: "System from env",
},
file: fileValues{
systemFile: "testdata/system.txt",
},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "System from env",
},
},
{
name: "empty system file",
flags: flagValues{
systemFile: "testdata/system_empty.txt",
},
env: envValues{},
file: fileValues{},
want: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.ppq.ai/chat/completions",
Model: "gpt-4o-mini",
FallbackModels: []string{"gpt-4.1-mini"},
SystemPrompt: "",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := mergeSources(tt.flags, tt.env, tt.file)
assert.Equal(t, tt.want, got)
})
}
}

1
config/testdata/api.key vendored Normal file
View File

@@ -0,0 +1 @@
sk-test-key-123

1
config/testdata/api_whitespace.key vendored Normal file
View File

@@ -0,0 +1 @@
sk-whitespace-key

0
config/testdata/empty.yaml vendored Normal file
View File

3
config/testdata/invalid.yaml vendored Normal file
View File

@@ -0,0 +1,3 @@
protocol: openai
url: [this is not valid yaml syntax
model: gpt-4

2
config/testdata/partial.yaml vendored Normal file
View File

@@ -0,0 +1,2 @@
model: gpt-4
fallback: gpt-3.5-turbo

1
config/testdata/system.txt vendored Normal file
View File

@@ -0,0 +1 @@
You are a helpful assistant.

0
config/testdata/system_empty.txt vendored Normal file
View File

4
config/testdata/unknown_keys.yaml vendored Normal file
View File

@@ -0,0 +1,4 @@
protocol: openai
model: gpt-4
unknown_field: ignored
another_unknown: also_ignored

6
config/testdata/valid.yaml vendored Normal file
View File

@@ -0,0 +1,6 @@
protocol: ollama
url: http://localhost:11434/api/chat
key_file: ~/.aicli_key
model: llama3
fallback: llama2,mistral
system_file: ~/system.txt

71
config/types.go Normal file
View File

@@ -0,0 +1,71 @@
package config
type APIProtocol int
const (
ProtocolOpenAI APIProtocol = iota
ProtocolOllama
)
type ConfigData struct {
// Input
FilePaths []string
PromptFlags []string
PromptPaths []string
StdinAsFile bool
// System
SystemPrompt string
// API
Protocol APIProtocol
URL string
APIKey string
// Models
Model string
FallbackModels []string
// Output
Output string
Quiet bool
Verbose bool
}
type flagValues struct {
files []string
prompts []string
promptFile string
system string
systemFile string
key string
keyFile string
protocol string
url string
model string
fallback string
output string
config string
stdinFile bool
quiet bool
verbose bool
version bool
}
type envValues struct {
protocol string
url string
key string
model string
fallback string
system string
}
type fileValues struct {
protocol string
url string
keyFile string
model string
fallback string
systemFile string
}

17
config/validate.go Normal file
View File

@@ -0,0 +1,17 @@
package config
import (
"fmt"
)
func validateConfig(cfg ConfigData) error {
if cfg.APIKey == "" {
return fmt.Errorf("API key required: use --key, --key-file, AICLI_API_KEY, AICLI_API_KEY_FILE, or key_file in config")
}
if cfg.Protocol != ProtocolOpenAI && cfg.Protocol != ProtocolOllama {
return fmt.Errorf("invalid protocol: must be openai or ollama")
}
return nil
}

74
config/validate_test.go Normal file
View File

@@ -0,0 +1,74 @@
package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestValidateConfig(t *testing.T) {
tests := []struct {
name string
cfg ConfigData
wantErr bool
errMsg string
}{
{
name: "valid config",
cfg: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.openai.com",
Model: "gpt-4",
FallbackModels: []string{"gpt-3.5"},
APIKey: "sk-test123",
},
wantErr: false,
},
{
name: "missing api key",
cfg: ConfigData{
Protocol: ProtocolOpenAI,
URL: "https://api.openai.com",
Model: "gpt-4",
FallbackModels: []string{"gpt-3.5"},
APIKey: "",
},
wantErr: true,
errMsg: "API key required",
},
{
name: "invalid protocol",
cfg: ConfigData{
Protocol: APIProtocol(99),
URL: "https://api.openai.com",
Model: "gpt-4",
FallbackModels: []string{"gpt-3.5"},
APIKey: "sk-test123",
},
wantErr: true,
errMsg: "invalid protocol",
},
{
name: "ollama protocol valid",
cfg: ConfigData{
Protocol: ProtocolOllama,
URL: "http://localhost:11434",
Model: "llama3",
FallbackModels: []string{},
APIKey: "not-used-but-required",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateConfig(tt.cfg)
if tt.wantErr {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.errMsg)
} else {
assert.NoError(t, err)
}
})
}
}