diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..1a11f75 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,32 @@ +# Goreleaser configuration version 2 +version: 2 + +builds: + - id: lcg + binary: "lcg_{{ .Version }}" + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + env: + - CGO_ENABLED=0 + ldflags: + - -s -w + - -X main.version={{.Version}} + - -X main.commit={{.Commit}} + - -X main.date={{.Date}} + main: . + dir: . + +archives: + - id: lcg + ids: + - lcg + formats: + - binary + name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}" + files: + - "lcg_{{ .Version }}" diff --git a/VERSION.txt b/VERSION.txt index 00f9e39..7f81993 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -v.2.0.25 +v.2.0.26 diff --git a/config/config.go b/config/config.go index 4585b17..bae1133 100644 --- a/config/config.go +++ b/config/config.go @@ -27,6 +27,7 @@ type Config struct { ResultHistory string NoHistoryEnv string AllowExecution bool + Think bool Query string MainFlags MainFlags Server ServerConfig diff --git a/deploy/VERSION.txt b/deploy/VERSION.txt index 00f9e39..7f81993 100644 --- a/deploy/VERSION.txt +++ b/deploy/VERSION.txt @@ -1 +1 @@ -v.2.0.25 +v.2.0.26 diff --git a/gpt/gpt.go b/gpt/gpt.go index a5c9eff..e479c65 100644 --- a/gpt/gpt.go +++ b/gpt/gpt.go @@ -44,11 +44,19 @@ type Chat struct { type Gpt3Request struct { Model string `json:"model"` - Stream bool `json:"stream"` + Stream bool `json:"stream"` Messages []Chat `json:"messages"` Options Gpt3Options `json:"options"` } +type Gpt3ThinkRequest struct { + Model string `json:"model"` + Stream bool `json:"stream"` + Think bool `json:"think"` + Messages []Chat `json:"messages"` + Options Gpt3Options `json:"options"` +} + type Gpt3Options struct { Temperature float64 `json:"temperature"` } diff --git a/gpt/providers.go b/gpt/providers.go index 6a01d67..c33ac57 100644 --- a/gpt/providers.go +++ b/gpt/providers.go @@ -200,12 +200,26 @@ func (p *ProxyAPIProvider) Health() error { // Chat для OllamaProvider func (o *OllamaProvider) Chat(messages []Chat) (string, error) { - payload := Gpt3Request{ + + think := config.AppConfig.Think + + var payload interface{} + if think { + payload = Gpt3Request{ Model: o.Model, Messages: messages, - Stream: false, + Stream: false, Options: Gpt3Options{o.Temperature}, } + } else { + payload = Gpt3ThinkRequest{ + Model: o.Model, + Messages: messages, + Stream: false, + Think: false, + Options: Gpt3Options{o.Temperature}, + } + } jsonData, err := json.Marshal(payload) if err != nil { diff --git a/kustomize/configmap.yaml b/kustomize/configmap.yaml index bb134b1..3d03740 100644 --- a/kustomize/configmap.yaml +++ b/kustomize/configmap.yaml @@ -5,7 +5,7 @@ metadata: namespace: lcg data: # Основные настройки - LCG_VERSION: "v.2.0.25" + LCG_VERSION: "v.2.0.26" LCG_BASE_PATH: "/lcg" LCG_SERVER_HOST: "0.0.0.0" LCG_SERVER_PORT: "8080" diff --git a/kustomize/deployment.yaml b/kustomize/deployment.yaml index e7978d4..0cddd13 100644 --- a/kustomize/deployment.yaml +++ b/kustomize/deployment.yaml @@ -5,7 +5,7 @@ metadata: namespace: lcg labels: app: lcg - version: v.2.0.25 + version: v.2.0.26 spec: replicas: 1 selector: @@ -18,7 +18,7 @@ spec: spec: containers: - name: lcg - image: kuznetcovay/lcg:v.2.0.25 + image: kuznetcovay/lcg:v.2.0.26 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/kustomize/kustomization.yaml b/kustomize/kustomization.yaml index 9b4ec2b..6066110 100644 --- a/kustomize/kustomization.yaml +++ b/kustomize/kustomization.yaml @@ -15,11 +15,11 @@ resources: # Common labels # commonLabels: # app: lcg -# version: v.2.0.25 +# version: v.2.0.26 # managed-by: kustomize # Images # images: # - name: lcg # newName: kuznetcovay/lcg -# newTag: v.2.0.25 +# newTag: v.2.0.26 diff --git a/main.go b/main.go index 326dee9..155567d 100644 --- a/main.go +++ b/main.go @@ -109,6 +109,7 @@ lcg [опции] <описание команды> LCG_PROXY_URL URL прокси для proxy провайдера (по умолчанию: /api/v1/protected/sberchat/chat) LCG_API_KEY_FILE Файл с API ключом (по умолчанию: .openai_api_key) LCG_APP_NAME Название приложения (по умолчанию: Linux Command GPT) + LCG_ALLOW_THINK только для ollama: разрешить модели отправлять свои размышления ("1" или "true" = разрешено, пусто = запрещено). Имеет смысл для моделей, которые поддерживают эти действия: qwen3, deepseek. Настройки истории и выполнения: LCG_NO_HISTORY Отключить запись истории ("1" или "true" = отключено, пусто = включено) @@ -163,12 +164,18 @@ lcg [опции] <описание команды> Usage: "Disable writing/updating command history (overrides LCG_NO_HISTORY)", Value: false, }, + &cli.BoolFlag{ + Name: "think", + Aliases: []string{"T"}, + Usage: "Разрешить модели отправлять свои размышления", + Value: false, + }, &cli.StringFlag{ Name: "query", Aliases: []string{"Q"}, Usage: "Query to send to the model", - DefaultText: "Hello? what day is it today?", - Value: "Hello? what day is it today?", + DefaultText: "Привет! Порадуй меня случайной Linux командой ...", + Value: "Привет! Порадуй меня случайной Linux командой ...", }, &cli.StringFlag{ Name: "sys", @@ -216,7 +223,10 @@ lcg [опции] <описание команды> if c.IsSet("model") { config.AppConfig.Model = model } - + config.AppConfig.Think = false + if c.IsSet("think") { + config.AppConfig.Think = c.Bool("think") + } promptID := c.Int("prompt-id") timeout := c.Int("timeout") @@ -1018,7 +1028,7 @@ func printDebugInfo(file, system, commandInput string, timeout int) { fmt.Printf("📁 Файл: %s\n", file) fmt.Printf("🤖 Системный промпт: %s\n", system) fmt.Printf("💬 Запрос: %s\n", commandInput) - fmt.Printf("⏱️ Таймаут: %d сек\n", timeout) + fmt.Printf("⏱️ Таймаут: %d сек\n", timeout) fmt.Printf("🌐 Провайдер: %s\n", config.AppConfig.ProviderType) fmt.Printf("🏠 Хост: %s\n", config.AppConfig.Host) fmt.Printf("🧠 Модель: %s\n", config.AppConfig.Model)