Compare commits

..

31 Commits

Author SHA1 Message Date
01f8adc979 release v1.1.5 2025-08-11 13:10:44 +06:00
e99fe76bef release v1.1.0 2025-08-11 13:06:14 +06:00
cc242e1192 improoved version 2025-08-11 13:01:27 +06:00
3e1c4594b1 added new proxy llm provider 2025-08-08 17:42:26 +06:00
ec2486ce3d release v1.0.4 2024-12-05 15:48:53 +06:00
46a0d9e45a release v1.0.3 2024-12-05 13:25:58 +06:00
12cd3fe6db moved to cli framework 2024-12-05 13:17:35 +06:00
7136fe4607 before refactor to cli framework 2024-12-05 11:15:38 +06:00
fa0a8565c3 release v1.0.2 2024-12-03 18:00:10 +06:00
8758ab19ef release v1.0.1 2024-12-03 17:17:04 +06:00
asrul10
7a40d8d51e feat: update prompt and model 2024-09-19 00:53:48 -04:00
asrul10
d11017d792 Merge pull request #12 from asrul10/feat/read-file
feat: read file and add to prompt
2024-02-08 12:00:21 +07:00
asrul10
1c4113d0c2 feat: read file and add to prompt 2024-02-08 11:57:25 +07:00
asrul10
00b2ea6614 Merge pull request #10 from asrul10/feat/copy-clipboard
(feat) Copy to clipboard
2023-12-19 10:12:35 +07:00
asrul10
9538b0fed5 feat: copy to clipboard 2023-12-19 10:09:26 +07:00
asrul10
5141cb69a3 update 2023-12-19 09:30:22 +07:00
asrul10
ae90ef6cfb Merge pull request #8 from asrul10/feature/regenerate-gpt
Add a feature to regenerate the result and some minor improvements
2023-04-09 10:56:43 +07:00
asrul10
7f81b1942b style: update version 2023-04-09 10:54:35 +07:00
asrul10
dafcaaff0f docs: update example result 2023-04-09 10:53:11 +07:00
asrul10
fbb68d2a28 feat: add regenerate options 2023-04-09 10:51:26 +07:00
asrul10
2d6fef23aa chore: set default options to (N)o execute 2023-04-09 10:33:53 +07:00
asrul10
432bfc61db feat: add example usage if there is no option 2023-04-09 10:30:13 +07:00
asrul10
0e50c8ec04 feat: remove borders to make it easy to copy 2023-04-09 10:25:50 +07:00
asrul10
148e1d9420 fix: box result 2023-03-25 22:26:38 +07:00
asrul10
952eee1a29 docs: fix link 2023-03-12 16:20:23 +07:00
asrul10
c2619a2864 docs: add executable link 2023-03-12 16:13:01 +07:00
asrul10
b1166a724d Merge pull request #1 from asrul10/workflow
Add github actions for release
2023-03-12 16:04:27 +07:00
asrul10
c6b1474117 chore: add github actions 2023-03-12 16:01:18 +07:00
asrul10
b04f7016b8 fix: remove precompiled file 2023-03-12 14:36:12 +07:00
asrul10
4f52b5bbad docs: add LICENSE 2023-03-12 06:48:21 +07:00
asrul10
dce4360043 fix: blocking loading when input api key 2023-03-12 06:27:03 +07:00
22 changed files with 1873 additions and 197 deletions

8
.gitignore vendored
View File

@@ -8,4 +8,10 @@
go.work
*.log
lcg
dist/
shell-code/build.env
bin-linux-amd64/*
bin-linux-arm64/*
binaries-for-upload/*
gpt_results
shell-code/jwt.admin.token

33
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,33 @@
archives:
- format: tar.gz
builds:
- binary: lcg
env:
- CGO_ENABLED=0
goarch:
- amd64
- arm64
- arm
goos:
- linux
- darwin
changelog:
filters:
exclude:
- '^docs:'
- '^test:'
sort: asc
checksum:
name_template: 'checksums.txt'
release:
draft: true
snapshot:
name_template: "{{ incpatch .Version }}-next"
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj

View File

@@ -0,0 +1,26 @@
FROM --platform=${BUILDPLATFORM} golang:1.24.6-alpine3.22 AS builder
ARG TARGETARCH
# RUN apk add git
#&& go install mvdan.cc/garble@latest
WORKDIR /app
COPY . .
RUN echo $BUILDPLATFORM > buildplatform
RUN echo $TARGETARCH > targetarch
RUN GOOS=linux GOARCH=$TARGETARCH go build -ldflags="-w -s" -o /app/go-lcg .
#RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} garble -literals -tiny build -ldflags="-w -s" -o /app/go-lcg .
FROM alpine:latest
WORKDIR /root
# COPY --from=builder /app/buildplatform .
# COPY --from=builder /app/targetarch .
COPY --from=builder /app/go-lcg /root/lcg
ENTRYPOINT ["/root/lcg"]

View File

@@ -0,0 +1,24 @@
FROM --platform=${BUILDPLATFORM} golang:1.24.6-alpine3.22 AS build
ARG TARGETOS
ARG TARGETARCH
# RUN apk add git
#&& go install mvdan.cc/garble@latest
WORKDIR /src
ENV CGO_ENABLED=0
COPY go.* .
RUN go mod download
COPY . .
RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -ldflags="-w -s -buildid=" -trimpath -o /out/go-lcg .
# RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} garble -literals -tiny build -ldflags="-w -s" -o /out/go-lcg .
FROM scratch AS bin-unix
COPY --from=build /out/go-lcg /lcg
FROM bin-unix AS bin-linux
FROM bin-unix AS bin-darwin
FROM scratch AS bin-windows
COPY --from=build /out/go-lcg /lcg.exe
FROM bin-${TARGETOS} AS bin

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 asrul10
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,8 +1,11 @@
## Linux Command GPT (lcg)
# Linux Command GPT (lcg)
Get Linux commands in natural language with the power of ChatGPT.
### Installation
## Installation
Build from source
```bash
> git clone --depth 1 https://github.com/asrul10/linux-command-gpt.git ~/.linux-command-gpt
> cd ~/.linux-command-gpt
@@ -11,23 +14,89 @@ Build from source
> ln -s ~/.linux-command-gpt/lcg ~/.local/bin
```
### Example Usage
Or you can [download lcg executable file](https://github.com/asrul10/linux-command-gpt/releases)
## Example Usage
```bash
> lcg I want to extract file linux-command-gpt.tar.gz
> lcg I want to extract linux-command-gpt.tar.gz file
Completed in 0.92 seconds
┌────────────────────────────────────┐
tar -xvzf linux-command-gpt.tar.gz
└────────────────────────────────────┘
Are you sure you want to execute the command? (Y/n):
tar -xvzf linux-command-gpt.tar.gz
Do you want to (c)opy, (r)egenerate, or take (N)o action on the command? (c/r/N):
```
```bash
> LCG_PROMPT='Provide full response' LCG_MODEL=codellama:13b lcg 'i need bash script
to execute some command by ssh on some array of hosts'
Completed in 181.16 seconds
Here is a sample Bash script that demonstrates how to execute commands over SSH on an array of hosts:
```bash
#!/bin/bash
hosts=(host1 host2 host3)
for host in "${hosts[@]}"; do
ssh $host "echo 'Hello, world!' > /tmp/hello.txt"
done
```
This script defines an array `hosts` that contains the names of the hosts to connect to. The loop iterates over each element in the array and uses the `ssh` command to execute a simple command on the remote host. In this case, the command is `echo 'Hello, world!' > /tmp/hello.txt`, which writes the string "Hello, world!" to a file called `/tmp/hello.txt`.
You can modify the script to run any command you like by replacing the `echo` command with your desired command. For example, if you want to run a Python script on each host, you could use the following command:
```bash
ssh $host "python /path/to/script.py"
```
This will execute the Python script located at `/path/to/script.py` on the remote host.
You can also modify the script to run multiple commands in a single SSH session by using the `&&` operator to chain the commands together. For example:
```bash
ssh $host "echo 'Hello, world!' > /tmp/hello.txt && python /path/to/script.py"
```
This will execute both the `echo` command and the Python script in a single SSH session.
I hope this helps! Let me know if you have any questions or need further assistance.
Do you want to (c)opy, (r)egenerate, or take (N)o action on the command? (c/r/N):
``` text
To use the "copy to clipboard" feature, you need to install either the `xclip` or `xsel` package.
### Options
```bash
> lcg [options]
--help output usage information
--version output the version number
--update-key update the API key
--delete-key delete the API key
--help -h output usage information
--version -v output the version number
--file -f read command from file
--update-key -u update the API key
--delete-key -d delete the API key
# ollama example
export LCG_PROVIDER=ollama
export LCG_HOST=http://192.168.87.108:11434/
export LCG_MODEL=codegeex4
lcg "I want to extract linux-command-gpt.tar.gz file"
export LCG_PROVIDER=proxy
export LCG_HOST=http://localhost:8080
export LCG_MODEL=GigaChat-2
export LCG_JWT_TOKEN=your_jwt_token_here
lcg "I want to extract linux-command-gpt.tar.gz file"
lcg health
lcg config
lcg update-jwt
```

1
VERSION.txt Normal file
View File

@@ -0,0 +1 @@
v1.1.5

224
_main.go Normal file
View File

@@ -0,0 +1,224 @@
// package main
// import (
// _ "embed"
// "fmt"
// "math"
// "os"
// "os/user"
// "path"
// "strings"
// "time"
// "github.com/atotto/clipboard"
// "github.com/direct-dev-ru/linux-command-gpt/gpt"
// "github.com/direct-dev-ru/linux-command-gpt/reader"
// )
// //go:embed VERSION.txt
// var Version string
// var cwd, _ = os.Getwd()
// var (
// HOST = getEnv("LCG_HOST", "http://192.168.87.108:11434/")
// COMPLETIONS = getEnv("LCG_COMPLETIONS_PATH", "api/chat") // relative part of endpoint
// MODEL = getEnv("LCG_MODEL", "codegeex4")
// PROMPT = getEnv("LCG_PROMPT", "Reply with linux command and nothing else. Output with plain response - no need formatting. No need explanation. No need code blocks. No need ` symbols.")
// API_KEY_FILE = getEnv("LCG_API_KEY_FILE", ".openai_api_key")
// RESULT_FOLDER = getEnv("LCG_RESULT_FOLDER", path.Join(cwd, "gpt_results"))
// // HOST = "https://api.openai.com/v1/"
// // COMPLETIONS = "chat/completions"
// // MODEL = "gpt-4o-mini"
// // MODEL = "codellama:13b"
// // This file is created in the user's home directory
// // Example: /home/username/.openai_api_key
// // API_KEY_FILE = ".openai_api_key"
// HELP = `
// Usage: lcg [options]
// --help -h output usage information
// --version -v output the version number
// --file -f read part of command from file or bash feature $(...)
// --update-key -u update the API key
// --delete-key -d delete the API key
// Example Usage: lcg I want to extract linux-command-gpt.tar.gz file
// Example Usage: lcg --file /path/to/file.json I want to print object questions with jq
// Env Vars:
// LCG_HOST - defaults to "http://192.168.87.108:11434/" - endpoint for Ollama or other LLM API
// LCG_COMPLETIONS_PATH -defaults to "api/chat" - relative part of endpoint
// LCG_MODEL - defaults to "codegeex4"
// LCG_PROMPT - defaults to Reply with linux command and nothing else. Output with plain response - no need formatting. No need explanation. No need code blocks.
// LCG_API_KEY_FILE - defaults to ${HOME}/.openai_api_key - file with API key
// LCG_RESULT_FOLDER - defaults to $(pwd)/gpt_results - folder to save results
// `
// VERSION = Version
// CMD_HELP = 100
// CMD_VERSION = 101
// CMD_UPDATE = 102
// CMD_DELETE = 103
// CMD_COMPLETION = 110
// )
// // getEnv retrieves the value of the environment variable `key` or returns `defaultValue` if not set.
// func getEnv(key, defaultValue string) string {
// if value, exists := os.LookupEnv(key); exists {
// return value
// }
// return defaultValue
// }
// func handleCommand(cmd string) int {
// if cmd == "" || cmd == "--help" || cmd == "-h" {
// return CMD_HELP
// }
// if cmd == "--version" || cmd == "-v" {
// return CMD_VERSION
// }
// if cmd == "--update-key" || cmd == "-u" {
// return CMD_UPDATE
// }
// if cmd == "--delete-key" || cmd == "-d" {
// return CMD_DELETE
// }
// return CMD_COMPLETION
// }
// func getCommand(gpt3 gpt.Gpt3, cmd string) (string, float64) {
// gpt3.InitKey()
// s := time.Now()
// done := make(chan bool)
// go func() {
// loadingChars := []rune{'-', '\\', '|', '/'}
// i := 0
// for {
// select {
// case <-done:
// fmt.Printf("\r")
// return
// default:
// fmt.Printf("\rLoading %c", loadingChars[i])
// i = (i + 1) % len(loadingChars)
// time.Sleep(30 * time.Millisecond)
// }
// }
// }()
// r := gpt3.Completions(cmd)
// done <- true
// elapsed := time.Since(s).Seconds()
// elapsed = math.Round(elapsed*100) / 100
// if r == "" {
// return "", elapsed
// }
// return r, elapsed
// }
// func main() {
// currentUser, err := user.Current()
// if err != nil {
// panic(err)
// }
// args := os.Args
// cmd := ""
// file := ""
// if len(args) > 1 {
// start := 1
// if args[1] == "--file" || args[1] == "-f" {
// file = args[2]
// start = 3
// }
// cmd = strings.Join(args[start:], " ")
// }
// if file != "" {
// err := reader.FileToPrompt(&cmd, file)
// if err != nil {
// fmt.Println(err)
// return
// }
// }
// if _, err := os.Stat(RESULT_FOLDER); os.IsNotExist(err) {
// os.MkdirAll(RESULT_FOLDER, 0755)
// }
// h := handleCommand(cmd)
// if h == CMD_HELP {
// fmt.Println(HELP)
// return
// }
// if h == CMD_VERSION {
// fmt.Println(VERSION)
// return
// }
// gpt3 := gpt.Gpt3{
// CompletionUrl: HOST + COMPLETIONS,
// Model: MODEL,
// Prompt: PROMPT,
// HomeDir: currentUser.HomeDir,
// ApiKeyFile: API_KEY_FILE,
// Temperature: 0.01,
// }
// if h == CMD_UPDATE {
// gpt3.UpdateKey()
// return
// }
// if h == CMD_DELETE {
// gpt3.DeleteKey()
// return
// }
// c := "R"
// r := ""
// elapsed := 0.0
// for c == "R" || c == "r" {
// r, elapsed = getCommand(gpt3, cmd)
// c = "N"
// fmt.Printf("Completed in %v seconds\n\n", elapsed)
// fmt.Println(r)
// fmt.Print("\nDo you want to (c)opy, (s)ave to file, (r)egenerate, or take (N)o action on the command? (c/r/N): ")
// fmt.Scanln(&c)
// // no action
// if c == "N" || c == "n" {
// return
// }
// }
// if r == "" {
// return
// }
// // Copy to clipboard
// if c == "C" || c == "c" {
// clipboard.WriteAll(r)
// fmt.Println("\033[33mCopied to clipboard")
// return
// }
// if c == "S" || c == "s" {
// timestamp := time.Now().Format("2006-01-02_15-04-05") // Format: YYYY-MM-DD_HH-MM-SS
// filename := fmt.Sprintf("gpt_request_%s(%s).md", timestamp, gpt3.Model)
// filePath := path.Join(RESULT_FOLDER, filename)
// resultString := fmt.Sprintf("## Prompt:\n\n%s\n\n------------------\n\n## Response:\n\n%s\n\n", cmd+". "+gpt3.Prompt, r)
// os.WriteFile(filePath, []byte(resultString), 0644)
// fmt.Println("\033[33mSaved to file")
// return
// }
// }

11
go.mod
View File

@@ -1,3 +1,12 @@
module github.com/asrul/linux-command-gpt
module github.com/direct-dev-ru/linux-command-gpt
go 1.18
require github.com/atotto/clipboard v0.1.4
require (
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/urfave/cli/v2 v2.27.5
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
)

10
go.sum Normal file
View File

@@ -0,0 +1,10 @@
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=

View File

@@ -1,23 +1,40 @@
package gpt
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
)
// ProxySimpleChatRequest структура для простого запроса
type ProxySimpleChatRequest struct {
Message string `json:"message"`
Model string `json:"model,omitempty"`
}
// ProxySimpleChatResponse структура ответа для простого запроса
type ProxySimpleChatResponse struct {
Response string `json:"response"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage,omitempty"`
Model string `json:"model,omitempty"`
Timeout int `json:"timeout_seconds,omitempty"`
}
// Gpt3 обновленная структура с поддержкой разных провайдеров
type Gpt3 struct {
CompletionUrl string
Provider Provider
Prompt string
Model string
HomeDir string
ApiKeyFile string
ApiKey string
Temperature float64
ProviderType string // "ollama", "proxy"
}
type Chat struct {
@@ -27,7 +44,13 @@ type Chat struct {
type Gpt3Request struct {
Model string `json:"model"`
Stream bool `json:"stream"`
Messages []Chat `json:"messages"`
Options Gpt3Options `json:"options"`
}
type Gpt3Options struct {
Temperature float64 `json:"temperature"`
}
type Gpt3Response struct {
@@ -36,6 +59,20 @@ type Gpt3Response struct {
} `json:"choices"`
}
// LlamaResponse represents the response structure.
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt string `json:"created_at"`
Message Chat `json:"message"`
Done bool `json:"done"`
TotalDuration int64 `json:"total_duration"`
LoadDuration int64 `json:"load_duration"`
PromptEvalCount int64 `json:"prompt_eval_count"`
PromptEvalDuration int64 `json:"prompt_eval_duration"`
EvalCount int64 `json:"eval_count"`
EvalDuration int64 `json:"eval_duration"`
}
func (gpt3 *Gpt3) deleteApiKey() {
filePath := gpt3.HomeDir + string(filepath.Separator) + gpt3.ApiKeyFile
if _, err := os.Stat(filePath); os.IsNotExist(err) {
@@ -88,7 +125,7 @@ func (gpt3 *Gpt3) loadApiKey() bool {
if _, err := os.Stat(apiKeyFile); os.IsNotExist(err) {
return false
}
apiKey, err := ioutil.ReadFile(apiKeyFile)
apiKey, err := os.ReadFile(apiKeyFile)
if err != nil {
return false
}
@@ -114,6 +151,11 @@ func (gpt3 *Gpt3) DeleteKey() {
}
func (gpt3 *Gpt3) InitKey() {
// Для ollama и proxy провайдеров не нужен API ключ
if gpt3.ProviderType == "ollama" || gpt3.ProviderType == "proxy" {
return
}
load := gpt3.loadApiKey()
if load {
return
@@ -124,50 +166,51 @@ func (gpt3 *Gpt3) InitKey() {
gpt3.storeApiKey(apiKey)
}
func (gpt3 *Gpt3) Completions(ask string) string {
req, err := http.NewRequest("POST", gpt3.CompletionUrl, nil)
if err != nil {
panic(err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(gpt3.ApiKey))
// NewGpt3 создает новый экземпляр GPT с выбранным провайдером
func NewGpt3(providerType, host, apiKey, model, prompt string, temperature float64, timeout int) *Gpt3 {
var provider Provider
switch providerType {
case "proxy":
provider = NewProxyAPIProvider(host, apiKey, model, timeout) // apiKey используется как JWT токен
case "ollama":
provider = NewOllamaProvider(host, model, temperature, timeout)
default:
provider = NewOllamaProvider(host, model, temperature, timeout)
}
return &Gpt3{
Provider: provider,
Prompt: prompt,
Model: model,
ApiKey: apiKey,
Temperature: temperature,
ProviderType: providerType,
}
}
// Completions обновленный метод с поддержкой разных провайдеров
func (gpt3 *Gpt3) Completions(ask string) string {
messages := []Chat{
{"system", gpt3.Prompt},
{"user", ask},
}
payload := Gpt3Request{
gpt3.Model,
messages,
}
payloadJson, err := json.Marshal(payload)
if err != nil {
panic(err)
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(payloadJson))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
{"user", ask + ". " + gpt3.Prompt},
}
if resp.StatusCode != http.StatusOK {
fmt.Println(string(body))
response, err := gpt3.Provider.Chat(messages)
if err != nil {
fmt.Printf("Ошибка при выполнении запроса: %v\n", err)
return ""
}
var res Gpt3Response
err = json.Unmarshal(body, &res)
if err != nil {
panic(err)
return response
}
return strings.TrimSpace(res.Choices[0].Message.Content)
// Health проверяет состояние провайдера
func (gpt3 *Gpt3) Health() error {
return gpt3.Provider.Health()
}
// GetAvailableModels возвращает список доступных моделей
func (gpt3 *Gpt3) GetAvailableModels() ([]string, error) {
return gpt3.Provider.GetAvailableModels()
}

Binary file not shown.

203
gpt/prompts.go Normal file
View File

@@ -0,0 +1,203 @@
package gpt
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
)
// SystemPrompt представляет системный промпт
type SystemPrompt struct {
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Content string `json:"content"`
}
// PromptManager управляет системными промптами
type PromptManager struct {
Prompts []SystemPrompt
ConfigFile string
HomeDir string
}
// NewPromptManager создает новый менеджер промптов
func NewPromptManager(homeDir string) *PromptManager {
configFile := filepath.Join(homeDir, ".lcg_prompts.json")
pm := &PromptManager{
ConfigFile: configFile,
HomeDir: homeDir,
}
// Загружаем предустановленные промпты
pm.loadDefaultPrompts()
// Загружаем пользовательские промпты
pm.loadCustomPrompts()
return pm
}
// loadDefaultPrompts загружает предустановленные промпты
func (pm *PromptManager) loadDefaultPrompts() {
defaultPrompts := []SystemPrompt{
{
ID: 1,
Name: "linux-command",
Description: "Generate Linux commands (default)",
Content: "Reply with linux command and nothing else. Output with plain response - no need formatting. No need explanation. No need code blocks. No need ` symbols.",
},
{
ID: 2,
Name: "linux-command-with-explanation",
Description: "Generate Linux commands with explanation",
Content: "Generate a Linux command and provide a brief explanation of what it does. Format: COMMAND: explanation",
},
{
ID: 3,
Name: "linux-command-safe",
Description: "Generate safe Linux commands",
Content: "Generate a safe Linux command that won't cause data loss or system damage. Reply with linux command and nothing else. Output with plain response - no need formatting.",
},
{
ID: 4,
Name: "linux-command-verbose",
Description: "Generate Linux commands with detailed explanation",
Content: "Generate a Linux command and provide detailed explanation including what each flag does and potential alternatives.",
},
{
ID: 5,
Name: "linux-command-simple",
Description: "Generate simple Linux commands",
Content: "Generate a simple, easy-to-understand Linux command. Avoid complex flags and options when possible.",
},
}
pm.Prompts = defaultPrompts
}
// loadCustomPrompts загружает пользовательские промпты из файла
func (pm *PromptManager) loadCustomPrompts() {
if _, err := os.Stat(pm.ConfigFile); os.IsNotExist(err) {
return
}
data, err := os.ReadFile(pm.ConfigFile)
if err != nil {
return
}
var customPrompts []SystemPrompt
if err := json.Unmarshal(data, &customPrompts); err != nil {
return
}
// Добавляем пользовательские промпты с новыми ID
for i, prompt := range customPrompts {
prompt.ID = len(pm.Prompts) + i + 1
pm.Prompts = append(pm.Prompts, prompt)
}
}
// saveCustomPrompts сохраняет пользовательские промпты
func (pm *PromptManager) saveCustomPrompts() error {
// Находим пользовательские промпты (ID > 5)
var customPrompts []SystemPrompt
for _, prompt := range pm.Prompts {
if prompt.ID > 5 {
customPrompts = append(customPrompts, prompt)
}
}
data, err := json.MarshalIndent(customPrompts, "", " ")
if err != nil {
return err
}
return os.WriteFile(pm.ConfigFile, data, 0644)
}
// GetPromptByID возвращает промпт по ID
func (pm *PromptManager) GetPromptByID(id int) (*SystemPrompt, error) {
for _, prompt := range pm.Prompts {
if prompt.ID == id {
return &prompt, nil
}
}
return nil, fmt.Errorf("промпт с ID %d не найден", id)
}
// GetPromptByName возвращает промпт по имени
func (pm *PromptManager) GetPromptByName(name string) (*SystemPrompt, error) {
for _, prompt := range pm.Prompts {
if strings.EqualFold(prompt.Name, name) {
return &prompt, nil
}
}
return nil, fmt.Errorf("промпт с именем '%s' не найден", name)
}
// ListPrompts выводит список всех доступных промптов
func (pm *PromptManager) ListPrompts() {
fmt.Println("Available system prompts:")
fmt.Println("ID | Name | Description")
fmt.Println("---+---------------------------+--------------------------------")
for _, prompt := range pm.Prompts {
description := prompt.Description
if len(description) > 80 {
description = description[:77] + "..."
}
fmt.Printf("%-2d | %-25s | %s\n",
prompt.ID,
truncateString(prompt.Name, 25),
description)
}
}
// AddCustomPrompt добавляет новый пользовательский промпт
func (pm *PromptManager) AddCustomPrompt(name, description, content string) error {
// Проверяем, что имя уникально
for _, prompt := range pm.Prompts {
if strings.EqualFold(prompt.Name, name) {
return fmt.Errorf("промпт с именем '%s' уже существует", name)
}
}
newPrompt := SystemPrompt{
ID: len(pm.Prompts) + 1,
Name: name,
Description: description,
Content: content,
}
pm.Prompts = append(pm.Prompts, newPrompt)
return pm.saveCustomPrompts()
}
// DeleteCustomPrompt удаляет пользовательский промпт
func (pm *PromptManager) DeleteCustomPrompt(id int) error {
if id <= 5 {
return fmt.Errorf("нельзя удалить предустановленный промпт")
}
for i, prompt := range pm.Prompts {
if prompt.ID == id {
pm.Prompts = append(pm.Prompts[:i], pm.Prompts[i+1:]...)
return pm.saveCustomPrompts()
}
}
return fmt.Errorf("промпт с ID %d не найден", id)
}
// truncateString обрезает строку до указанной длины
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen-3] + "..."
}

296
gpt/providers.go Normal file
View File

@@ -0,0 +1,296 @@
package gpt
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
// Provider интерфейс для работы с разными LLM провайдерами
type Provider interface {
Chat(messages []Chat) (string, error)
Health() error
GetAvailableModels() ([]string, error)
}
// ProxyAPIProvider реализация для прокси API (gin-restapi)
type ProxyAPIProvider struct {
BaseURL string
JWTToken string
Model string
HTTPClient *http.Client
}
// ProxyChatRequest структура запроса к прокси API
type ProxyChatRequest struct {
Messages []Chat `json:"messages"`
Model string `json:"model,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stream bool `json:"stream,omitempty"`
SystemContent string `json:"system_content,omitempty"`
UserContent string `json:"user_content,omitempty"`
RandomWords []string `json:"random_words,omitempty"`
FallbackString string `json:"fallback_string,omitempty"`
}
// ProxyChatResponse структура ответа от прокси API
type ProxyChatResponse struct {
Response string `json:"response"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage,omitempty"`
Error string `json:"error,omitempty"`
Model string `json:"model,omitempty"`
Timeout int `json:"timeout_seconds,omitempty"`
}
// ProxyHealthResponse структура ответа health check
type ProxyHealthResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Model string `json:"default_model,omitempty"`
Timeout int `json:"default_timeout_seconds,omitempty"`
}
// OllamaProvider реализация для Ollama API
type OllamaProvider struct {
BaseURL string
Model string
Temperature float64
HTTPClient *http.Client
}
// OllamaTagsResponse структура ответа для получения списка моделей
type OllamaTagsResponse struct {
Models []struct {
Name string `json:"name"`
ModifiedAt string `json:"modified_at"`
Size int64 `json:"size"`
} `json:"models"`
}
func NewProxyAPIProvider(baseURL, jwtToken, model string, timeout int) *ProxyAPIProvider {
return &ProxyAPIProvider{
BaseURL: strings.TrimSuffix(baseURL, "/"),
JWTToken: jwtToken,
Model: model,
HTTPClient: &http.Client{Timeout: time.Duration(timeout) * time.Second},
}
}
func NewOllamaProvider(baseURL, model string, temperature float64, timeout int) *OllamaProvider {
return &OllamaProvider{
BaseURL: strings.TrimSuffix(baseURL, "/"),
Model: model,
Temperature: temperature,
HTTPClient: &http.Client{Timeout: time.Duration(timeout) * time.Second},
}
}
// Chat для ProxyAPIProvider
func (p *ProxyAPIProvider) Chat(messages []Chat) (string, error) {
// Используем основной endpoint /api/v1/protected/sberchat/chat
payload := ProxyChatRequest{
Messages: messages,
Model: p.Model,
Temperature: 0.5,
TopP: 0.5,
Stream: false,
RandomWords: []string{"linux", "command", "gpt"},
FallbackString: "I'm sorry, I can't help with that. Please try again.",
}
jsonData, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("ошибка маршалинга запроса: %w", err)
}
req, err := http.NewRequest("POST", p.BaseURL+"/api/v1/protected/sberchat/chat", bytes.NewBuffer(jsonData))
if err != nil {
return "", fmt.Errorf("ошибка создания запроса: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if p.JWTToken != "" {
req.Header.Set("Authorization", "Bearer "+p.JWTToken)
}
resp, err := p.HTTPClient.Do(req)
if err != nil {
return "", fmt.Errorf("ошибка выполнения запроса: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("ошибка чтения ответа: %w", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("ошибка API: %d - %s", resp.StatusCode, string(body))
}
var response ProxyChatResponse
if err := json.Unmarshal(body, &response); err != nil {
return "", fmt.Errorf("ошибка парсинга ответа: %w", err)
}
if response.Error != "" {
return "", fmt.Errorf("ошибка прокси API: %s", response.Error)
}
if response.Response == "" {
return "", fmt.Errorf("пустой ответ от API")
}
return strings.TrimSpace(response.Response), nil
}
// Health для ProxyAPIProvider
func (p *ProxyAPIProvider) Health() error {
req, err := http.NewRequest("GET", p.BaseURL+"/api/v1/protected/sberchat/health", nil)
if err != nil {
return fmt.Errorf("ошибка создания health check запроса: %w", err)
}
if p.JWTToken != "" {
req.Header.Set("Authorization", "Bearer "+p.JWTToken)
}
resp, err := p.HTTPClient.Do(req)
if err != nil {
return fmt.Errorf("ошибка health check: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("health check failed: %d", resp.StatusCode)
}
var healthResponse ProxyHealthResponse
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("ошибка чтения health check ответа: %w", err)
}
if err := json.Unmarshal(body, &healthResponse); err != nil {
return fmt.Errorf("ошибка парсинга health check ответа: %w", err)
}
if healthResponse.Status != "ok" {
return fmt.Errorf("health check status: %s - %s", healthResponse.Status, healthResponse.Message)
}
return nil
}
// Chat для OllamaProvider
func (o *OllamaProvider) Chat(messages []Chat) (string, error) {
payload := Gpt3Request{
Model: o.Model,
Messages: messages,
Stream: false,
Options: Gpt3Options{o.Temperature},
}
jsonData, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("ошибка маршалинга запроса: %w", err)
}
req, err := http.NewRequest("POST", o.BaseURL+"/api/chat", bytes.NewBuffer(jsonData))
if err != nil {
return "", fmt.Errorf("ошибка создания запроса: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := o.HTTPClient.Do(req)
if err != nil {
return "", fmt.Errorf("ошибка выполнения запроса: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("ошибка чтения ответа: %w", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("ошибка API: %d - %s", resp.StatusCode, string(body))
}
var response OllamaResponse
if err := json.Unmarshal(body, &response); err != nil {
return "", fmt.Errorf("ошибка парсинга ответа: %w", err)
}
return strings.TrimSpace(response.Message.Content), nil
}
// Health для OllamaProvider
func (o *OllamaProvider) Health() error {
req, err := http.NewRequest("GET", o.BaseURL+"/api/tags", nil)
if err != nil {
return fmt.Errorf("ошибка создания health check запроса: %w", err)
}
resp, err := o.HTTPClient.Do(req)
if err != nil {
return fmt.Errorf("ошибка health check: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("health check failed: %d", resp.StatusCode)
}
return nil
}
// GetAvailableModels для ProxyAPIProvider возвращает фиксированный список
func (p *ProxyAPIProvider) GetAvailableModels() ([]string, error) {
return []string{"GigaChat-2", "GigaChat-2-Pro", "GigaChat-2-Max"}, nil
}
// GetAvailableModels возвращает список доступных моделей для провайдера
func (o *OllamaProvider) GetAvailableModels() ([]string, error) {
req, err := http.NewRequest("GET", o.BaseURL+"/api/tags", nil)
if err != nil {
return nil, fmt.Errorf("ошибка создания запроса: %w", err)
}
resp, err := o.HTTPClient.Do(req)
if err != nil {
return nil, fmt.Errorf("ошибка получения моделей: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("ошибка чтения ответа: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("ошибка API: %d - %s", resp.StatusCode, string(body))
}
var response OllamaTagsResponse
if err := json.Unmarshal(body, &response); err != nil {
return nil, fmt.Errorf("ошибка парсинга ответа: %w", err)
}
var models []string
for _, model := range response.Models {
models = append(models, model.Name)
}
return models, nil
}

666
main.go
View File

@@ -1,148 +1,626 @@
package main
import (
_ "embed"
"fmt"
"math"
"os"
"os/exec"
"os/user"
"path"
"strconv"
"strings"
"time"
"github.com/asrul/linux-command-gpt/gpt"
"github.com/atotto/clipboard"
"github.com/direct-dev-ru/linux-command-gpt/gpt"
"github.com/direct-dev-ru/linux-command-gpt/reader"
"github.com/urfave/cli/v2"
)
//go:embed VERSION.txt
var Version string
var (
cwd, _ = os.Getwd()
HOST = getEnv("LCG_HOST", "http://192.168.87.108:11434/")
COMPLETIONS = getEnv("LCG_COMPLETIONS_PATH", "api/chat")
MODEL = getEnv("LCG_MODEL", "codegeex4")
PROMPT = getEnv("LCG_PROMPT", "Reply with linux command and nothing else. Output with plain response - no need formatting. No need explanation. No need code blocks. No need ` symbols.")
API_KEY_FILE = getEnv("LCG_API_KEY_FILE", ".openai_api_key")
RESULT_FOLDER = getEnv("LCG_RESULT_FOLDER", path.Join(cwd, "gpt_results"))
PROVIDER_TYPE = getEnv("LCG_PROVIDER", "ollama") // "ollama", "proxy"
JWT_TOKEN = getEnv("LCG_JWT_TOKEN", "")
PROMPT_ID = getEnv("LCG_PROMPT_ID", "1") // ID промпта по умолчанию
TIMEOUT = getEnv("LCG_TIMEOUT", "120") // Таймаут в секундах по умолчанию
)
const (
HOST = "https://api.openai.com/v1/"
COMPLETIONS = "chat/completions"
MODEL = "gpt-3.5-turbo"
PROMPT = "I want you to reply with linux command and nothing else. Do not write explanations."
// This file is created in the user's home directory
// Example: /home/username/.openai_api_key
API_KEY_FILE = ".openai_api_key"
HELP = `
Usage: lcg [options]
--help output usage information
--version output the version number
--update-key update the API key
--delete-key delete the API key
`
VERSION = "0.1.0"
CMD_HELP = 100
CMD_VERSION = 101
CMD_UPDATE = 102
CMD_DELETE = 103
CMD_COMPLETION = 110
colorRed = "\033[31m"
colorGreen = "\033[32m"
colorYellow = "\033[33m"
colorBlue = "\033[34m"
colorPurple = "\033[35m"
colorCyan = "\033[36m"
colorReset = "\033[0m"
colorBold = "\033[1m"
)
func handleCommand(cmd string) int {
if cmd == "" || cmd == "--help" || cmd == "-h" {
return CMD_HELP
}
if cmd == "--version" || cmd == "-v" {
return CMD_VERSION
}
if cmd == "--update-key" || cmd == "-u" {
return CMD_UPDATE
}
if cmd == "--delete-key" || cmd == "-d" {
return CMD_DELETE
}
return CMD_COMPLETION
}
func main() {
currentUser, err := user.Current()
if err != nil {
panic(err)
app := &cli.App{
Name: "lcg",
Usage: "Linux Command GPT - Генерация Linux команд из описаний",
Version: Version,
Commands: getCommands(),
UsageText: `
lcg [опции] <описание команды>
Примеры:
lcg "хочу извлечь файл linux-command-gpt.tar.gz"
lcg --file /path/to/file.txt "хочу вывести все директории с помощью ls"
`,
Description: `
Linux Command GPT - инструмент для генерации Linux команд из описаний на естественном языке.
Поддерживает чтение частей промпта из файлов и позволяет сохранять, копировать или перегенерировать результаты.
Переменные окружения:
LCG_HOST Endpoint для LLM API (по умолчанию: http://192.168.87.108:11434/)
LCG_MODEL Название модели (по умолчанию: codegeex4)
LCG_PROMPT Текст промпта по умолчанию
LCG_PROVIDER Тип провайдера: "ollama" или "proxy" (по умолчанию: ollama)
LCG_JWT_TOKEN JWT токен для proxy провайдера
`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "file",
Aliases: []string{"f"},
Usage: "Read part of the command from a file",
},
&cli.StringFlag{
Name: "sys",
Aliases: []string{"s"},
Usage: "System prompt content or ID",
DefaultText: "Use prompt ID from LCG_PROMPT_ID or default prompt",
Value: "",
},
&cli.IntFlag{
Name: "prompt-id",
Aliases: []string{"pid"},
Usage: "System prompt ID (1-5 for default prompts)",
DefaultText: "1",
Value: 1,
},
&cli.IntFlag{
Name: "timeout",
Aliases: []string{"t"},
Usage: "Request timeout in seconds",
DefaultText: "120",
Value: 120,
},
},
Action: func(c *cli.Context) error {
file := c.String("file")
system := c.String("sys")
promptID := c.Int("prompt-id")
timeout := c.Int("timeout")
args := c.Args().Slice()
if len(args) == 0 {
cli.ShowAppHelp(c)
showTips()
return nil
}
args := os.Args
cmd := ""
if len(args) > 1 {
cmd = strings.Join(args[1:], " ")
// Если указан prompt-id, загружаем соответствующий промпт
if system == "" && promptID > 0 {
currentUser, _ := user.Current()
pm := gpt.NewPromptManager(currentUser.HomeDir)
if prompt, err := pm.GetPromptByID(promptID); err == nil {
system = prompt.Content
} else {
fmt.Printf("Warning: Prompt ID %d not found, using default prompt\n", promptID)
}
h := handleCommand(cmd)
if h == CMD_HELP {
fmt.Println(HELP)
return
}
if h == CMD_VERSION {
fmt.Println(VERSION)
return
executeMain(file, system, strings.Join(args, " "), timeout)
return nil
},
}
gpt3 := gpt.Gpt3{
CompletionUrl: HOST + COMPLETIONS,
Model: MODEL,
Prompt: PROMPT,
HomeDir: currentUser.HomeDir,
ApiKeyFile: API_KEY_FILE,
cli.VersionFlag = &cli.BoolFlag{
Name: "version",
Aliases: []string{"V", "v"},
Usage: "prints out version",
}
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Printf("%s\n", cCtx.App.Version)
}
if h == CMD_UPDATE {
if err := app.Run(os.Args); err != nil {
fmt.Println("Error:", err)
os.Exit(1)
}
}
func getCommands() []*cli.Command {
return []*cli.Command{
{
Name: "update-key",
Aliases: []string{"u"},
Usage: "Update the API key",
Action: func(c *cli.Context) error {
if PROVIDER_TYPE == "ollama" || PROVIDER_TYPE == "proxy" {
fmt.Println("API key is not needed for ollama and proxy providers")
return nil
}
timeout := 120 // default timeout
if t, err := strconv.Atoi(TIMEOUT); err == nil {
timeout = t
}
gpt3 := initGPT(PROMPT, timeout)
gpt3.UpdateKey()
return
fmt.Println("API key updated.")
return nil
},
},
{
Name: "delete-key",
Aliases: []string{"d"},
Usage: "Delete the API key",
Action: func(c *cli.Context) error {
if PROVIDER_TYPE == "ollama" || PROVIDER_TYPE == "proxy" {
fmt.Println("API key is not needed for ollama and proxy providers")
return nil
}
if h == CMD_DELETE {
timeout := 120 // default timeout
if t, err := strconv.Atoi(TIMEOUT); err == nil {
timeout = t
}
gpt3 := initGPT(PROMPT, timeout)
gpt3.DeleteKey()
fmt.Println("API key deleted.")
return nil
},
},
{
Name: "update-jwt",
Aliases: []string{"j"},
Usage: "Update the JWT token for proxy API",
Action: func(c *cli.Context) error {
if PROVIDER_TYPE != "proxy" {
fmt.Println("JWT token is only needed for proxy provider")
return nil
}
var jwtToken string
fmt.Print("JWT Token: ")
fmt.Scanln(&jwtToken)
currentUser, _ := user.Current()
jwtFile := currentUser.HomeDir + "/.proxy_jwt_token"
if err := os.WriteFile(jwtFile, []byte(strings.TrimSpace(jwtToken)), 0600); err != nil {
fmt.Printf("Ошибка сохранения JWT токена: %v\n", err)
return err
}
fmt.Println("JWT token updated.")
return nil
},
},
{
Name: "delete-jwt",
Aliases: []string{"dj"},
Usage: "Delete the JWT token for proxy API",
Action: func(c *cli.Context) error {
if PROVIDER_TYPE != "proxy" {
fmt.Println("JWT token is only needed for proxy provider")
return nil
}
currentUser, _ := user.Current()
jwtFile := currentUser.HomeDir + "/.proxy_jwt_token"
if err := os.Remove(jwtFile); err != nil && !os.IsNotExist(err) {
fmt.Printf("Ошибка удаления JWT токена: %v\n", err)
return err
}
fmt.Println("JWT token deleted.")
return nil
},
},
{
Name: "models",
Aliases: []string{"m"},
Usage: "Show available models",
Action: func(c *cli.Context) error {
timeout := 120 // default timeout
if t, err := strconv.Atoi(TIMEOUT); err == nil {
timeout = t
}
gpt3 := initGPT(PROMPT, timeout)
models, err := gpt3.GetAvailableModels()
if err != nil {
fmt.Printf("Ошибка получения моделей: %v\n", err)
return err
}
fmt.Printf("Доступные модели для провайдера %s:\n", PROVIDER_TYPE)
for i, model := range models {
fmt.Printf(" %d. %s\n", i+1, model)
}
return nil
},
},
{
Name: "health",
Aliases: []string{"he"}, // Изменено с "h" на "he"
Usage: "Check API health",
Action: func(c *cli.Context) error {
timeout := 120 // default timeout
if t, err := strconv.Atoi(TIMEOUT); err == nil {
timeout = t
}
gpt3 := initGPT(PROMPT, timeout)
if err := gpt3.Health(); err != nil {
fmt.Printf("Health check failed: %v\n", err)
return err
}
fmt.Println("API is healthy.")
return nil
},
},
{
Name: "config",
Aliases: []string{"co"}, // Изменено с "c" на "co"
Usage: "Show current configuration",
Action: func(c *cli.Context) error {
fmt.Printf("Provider: %s\n", PROVIDER_TYPE)
fmt.Printf("Host: %s\n", HOST)
fmt.Printf("Model: %s\n", MODEL)
fmt.Printf("Prompt: %s\n", PROMPT)
fmt.Printf("Timeout: %s seconds\n", TIMEOUT)
if PROVIDER_TYPE == "proxy" {
fmt.Printf("JWT Token: %s\n", func() string {
if JWT_TOKEN != "" {
return "***set***"
}
currentUser, _ := user.Current()
jwtFile := currentUser.HomeDir + "/.proxy_jwt_token"
if _, err := os.Stat(jwtFile); err == nil {
return "***from file***"
}
return "***not set***"
}())
}
return nil
},
},
{
Name: "history",
Aliases: []string{"hist"},
Usage: "Show command history",
Action: func(c *cli.Context) error {
showHistory()
return nil
},
},
{
Name: "prompts",
Aliases: []string{"p"},
Usage: "Manage system prompts",
Subcommands: []*cli.Command{
{
Name: "list",
Aliases: []string{"l"},
Usage: "List all available prompts",
Action: func(c *cli.Context) error {
currentUser, _ := user.Current()
pm := gpt.NewPromptManager(currentUser.HomeDir)
pm.ListPrompts()
return nil
},
},
{
Name: "add",
Aliases: []string{"a"},
Usage: "Add a new custom prompt",
Action: func(c *cli.Context) error {
currentUser, _ := user.Current()
pm := gpt.NewPromptManager(currentUser.HomeDir)
var name, description, content string
fmt.Print("Название промпта: ")
fmt.Scanln(&name)
fmt.Print("Описание: ")
fmt.Scanln(&description)
fmt.Print("Содержание промпта: ")
fmt.Scanln(&content)
if err := pm.AddCustomPrompt(name, description, content); err != nil {
fmt.Printf("Ошибка добавления промпта: %v\n", err)
return err
}
fmt.Println("Промпт успешно добавлен!")
return nil
},
},
{
Name: "delete",
Aliases: []string{"d"},
Usage: "Delete a custom prompt",
Action: func(c *cli.Context) error {
if c.NArg() == 0 {
fmt.Println("Укажите ID промпта для удаления")
return nil
}
var id int
if _, err := fmt.Sscanf(c.Args().First(), "%d", &id); err != nil {
fmt.Println("Неверный ID промпта")
return err
}
currentUser, _ := user.Current()
pm := gpt.NewPromptManager(currentUser.HomeDir)
if err := pm.DeleteCustomPrompt(id); err != nil {
fmt.Printf("Ошибка удаления промпта: %v\n", err)
return err
}
fmt.Println("Промпт успешно удален!")
return nil
},
},
},
},
{
Name: "test-prompt",
Aliases: []string{"tp"},
Usage: "Test a specific prompt ID",
Action: func(c *cli.Context) error {
if c.NArg() == 0 {
fmt.Println("Usage: lcg test-prompt <prompt-id> <command>")
return nil
}
var promptID int
if _, err := fmt.Sscanf(c.Args().First(), "%d", &promptID); err != nil {
fmt.Println("Invalid prompt ID")
return err
}
currentUser, _ := user.Current()
pm := gpt.NewPromptManager(currentUser.HomeDir)
prompt, err := pm.GetPromptByID(promptID)
if err != nil {
fmt.Printf("Prompt ID %d not found\n", promptID)
return err
}
fmt.Printf("Testing prompt ID %d: %s\n", promptID, prompt.Name)
fmt.Printf("Description: %s\n", prompt.Description)
fmt.Printf("Content: %s\n", prompt.Content)
if len(c.Args().Slice()) > 1 {
command := strings.Join(c.Args().Slice()[1:], " ")
fmt.Printf("\nTesting with command: %s\n", command)
timeout := 120 // default timeout
if t, err := strconv.Atoi(TIMEOUT); err == nil {
timeout = t
}
executeMain("", prompt.Content, command, timeout)
}
return nil
},
},
}
}
func executeMain(file, system, commandInput string, timeout int) {
if file != "" {
if err := reader.FileToPrompt(&commandInput, file); err != nil {
printColored(fmt.Sprintf("❌ Ошибка чтения файла: %v\n", err), colorRed)
return
}
}
// Если system пустой, используем дефолтный промпт
if system == "" {
system = PROMPT
}
if _, err := os.Stat(RESULT_FOLDER); os.IsNotExist(err) {
if err := os.MkdirAll(RESULT_FOLDER, 0755); err != nil {
printColored(fmt.Sprintf("❌ Ошибка создания папки результатов: %v\n", err), colorRed)
return
}
}
gpt3 := initGPT(system, timeout)
printColored("🤖 Запрос: ", colorCyan)
fmt.Printf("%s\n", commandInput)
response, elapsed := getCommand(gpt3, commandInput)
if response == "" {
printColored("❌ Ответ не получен. Проверьте подключение к API.\n", colorRed)
return
}
s := time.Now()
printColored(fmt.Sprintf("✅ Выполнено за %.2f сек\n", elapsed), colorGreen)
printColored("\n📋 Команда:\n", colorYellow)
printColored(fmt.Sprintf(" %s\n\n", response), colorBold+colorGreen)
saveToHistory(commandInput, response)
handlePostResponse(response, gpt3, system, commandInput)
}
func initGPT(system string, timeout int) gpt.Gpt3 {
currentUser, _ := user.Current()
// Загружаем JWT токен в зависимости от провайдера
var jwtToken string
if PROVIDER_TYPE == "proxy" {
jwtToken = JWT_TOKEN
if jwtToken == "" {
// Пытаемся загрузить из файла
jwtFile := currentUser.HomeDir + "/.proxy_jwt_token"
if data, err := os.ReadFile(jwtFile); err == nil {
jwtToken = strings.TrimSpace(string(data))
}
}
}
return *gpt.NewGpt3(PROVIDER_TYPE, HOST, jwtToken, MODEL, system, 0.01, timeout)
}
func getCommand(gpt3 gpt.Gpt3, cmd string) (string, float64) {
gpt3.InitKey()
start := time.Now()
done := make(chan bool)
go func() {
loadingChars := []rune{'-', '\\', '|', '/'}
loadingChars := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
i := 0
for {
select {
case <-done:
fmt.Printf("\r")
fmt.Printf("\r%s", strings.Repeat(" ", 50))
fmt.Print("\r")
return
default:
fmt.Printf("\rLoading %c", loadingChars[i])
fmt.Printf("\r%s Обрабатываю запрос...", loadingChars[i])
i = (i + 1) % len(loadingChars)
time.Sleep(30 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
}
}
}()
gpt3.InitKey()
r := gpt3.Completions(cmd)
response := gpt3.Completions(cmd)
done <- true
if r == "" {
elapsed := math.Round(time.Since(start).Seconds()*100) / 100
return response, elapsed
}
func handlePostResponse(response string, gpt3 gpt.Gpt3, system, cmd string) {
fmt.Printf("Действия: (c)копировать, (s)сохранить, (r)перегенерировать, (e)выполнить, (n)ничего: ")
var choice string
fmt.Scanln(&choice)
switch strings.ToLower(choice) {
case "c":
clipboard.WriteAll(response)
fmt.Println("✅ Команда скопирована в буфер обмена")
case "s":
saveResponse(response, gpt3, cmd)
case "r":
fmt.Println("🔄 Перегенерирую...")
executeMain("", system, cmd, 120) // Use default timeout for regeneration
case "e":
executeCommand(response)
default:
fmt.Println(" До свидания!")
}
}
func saveResponse(response string, gpt3 gpt.Gpt3, cmd string) {
timestamp := time.Now().Format("2006-01-02_15-04-05")
filename := fmt.Sprintf("gpt_request_%s_%s.md", gpt3.Model, timestamp)
filePath := path.Join(RESULT_FOLDER, filename)
content := fmt.Sprintf("## Prompt:\n\n%s\n\n## Response:\n\n%s\n", cmd+". "+gpt3.Prompt, response)
if err := os.WriteFile(filePath, []byte(content), 0644); err != nil {
fmt.Println("Failed to save response:", err)
} else {
fmt.Printf("Response saved to %s\n", filePath)
}
}
func executeCommand(command string) {
fmt.Printf("🚀 Выполняю: %s\n", command)
fmt.Print("Продолжить? (y/N): ")
var confirm string
fmt.Scanln(&confirm)
if strings.ToLower(confirm) == "y" || strings.ToLower(confirm) == "yes" {
cmd := exec.Command("bash", "-c", command)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
fmt.Printf("❌ Ошибка выполнения: %v\n", err)
} else {
fmt.Println("✅ Команда выполнена успешно")
}
} else {
fmt.Println("❌ Выполнение отменено")
}
}
func getEnv(key, defaultValue string) string {
if value, exists := os.LookupEnv(key); exists {
return value
}
return defaultValue
}
type CommandHistory struct {
Command string
Response string
Timestamp time.Time
}
var commandHistory []CommandHistory
func saveToHistory(cmd, response string) {
commandHistory = append(commandHistory, CommandHistory{
Command: cmd,
Response: response,
Timestamp: time.Now(),
})
// Ограничиваем историю 100 командами
if len(commandHistory) > 100 {
commandHistory = commandHistory[1:]
}
}
func showHistory() {
if len(commandHistory) == 0 {
printColored("📝 История пуста\n", colorYellow)
return
}
c := "Y"
elapsed := time.Since(s).Seconds()
elapsed = math.Round(elapsed*100) / 100
fmt.Printf("Completed in %v seconds\n", elapsed)
fmt.Printf("┌%s┐\n", strings.Repeat("─", len(r)+2))
fmt.Printf("│ %s │\n", r)
fmt.Printf("└%s┘\n", strings.Repeat("─", len(r)+2))
fmt.Print("Are you sure you want to execute the command? (Y/n): ")
fmt.Scanln(&c)
if c != "Y" && c != "y" {
return
printColored("📝 История команд:\n", colorYellow)
for i, hist := range commandHistory {
fmt.Printf("%d. %s → %s (%s)\n",
i+1,
hist.Command,
hist.Response,
hist.Timestamp.Format("15:04:05"))
}
}
cmsplit := strings.Split(r, " ")
cm := exec.Command(cmsplit[0], cmsplit[1:]...)
out, err := cm.Output()
if err != nil {
fmt.Println(err.Error())
return
func printColored(text, color string) {
fmt.Printf("%s%s%s", color, text, colorReset)
}
fmt.Println(string(out))
func showTips() {
printColored("💡 Подсказки:\n", colorCyan)
fmt.Println(" • Используйте --file для чтения из файла")
fmt.Println(" • Используйте --sys для изменения системного промпта")
fmt.Println(" • Используйте --prompt-id для выбора предустановленного промпта")
fmt.Println(" • Используйте --timeout для установки таймаута запроса")
fmt.Println(" • Команда 'prompts list' покажет все доступные промпты")
fmt.Println(" • Команда 'history' покажет историю запросов")
fmt.Println(" • Команда 'config' покажет текущие настройки")
fmt.Println(" • Команда 'health' проверит доступность API")
}

View File

@@ -1,33 +1 @@
package main
import (
"testing"
)
func TestHandleCommand(t *testing.T) {
tests := []struct {
command string
expected int
}{
{"", CMD_HELP},
{"--help", CMD_HELP},
{"-h", CMD_HELP},
{"--version", CMD_VERSION},
{"-v", CMD_VERSION},
{"--update-key", CMD_UPDATE},
{"-u", CMD_UPDATE},
{"--delete-key", CMD_DELETE},
{"-d", CMD_DELETE},
{"random strings", CMD_COMPLETION},
{"--test", CMD_COMPLETION},
{"-test", CMD_COMPLETION},
{"how to extract test.tar.gz", CMD_COMPLETION},
}
for _, test := range tests {
result := handleCommand(test.command)
if result != test.expected {
t.Error("Expected", test.expected, "got", result)
}
}
}

24
reader/file.go Normal file
View File

@@ -0,0 +1,24 @@
package reader
import (
"bufio"
"os"
)
func FileToPrompt(cmd *string, filePath string) error {
f, err := os.Open(filePath)
if err != nil {
return err
}
defer f.Close()
reader := bufio.NewReader(f)
*cmd = *cmd + "\nFile path: " + filePath + "\n"
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
*cmd = *cmd + "\n" + line
}
return nil
}

134
shell-code/build-full.sh Normal file
View File

@@ -0,0 +1,134 @@
#!/bin/bash
# Включаем строгий режим для лучшей отладки
set -euo pipefail
# Конфигурация
readonly REPO="kuznetcovay/go-lcg"
readonly BRANCH="main"
readonly BINARY_NAME="lcg"
# Получаем версию из аргумента или используем значение по умолчанию
VERSION="${1:-v1.1.0}"
# Цвета для вывода
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly NC='\033[0m' # No Color
# Функции для логирования
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Функция для обработки ошибок
handle_error() {
local exit_code=$?
log_error "Скрипт завершился с ошибкой (код: $exit_code)"
exit $exit_code
}
# Функция для восстановления ветки
restore_branch() {
if [[ -n "${CURRENT_BRANCH:-}" ]]; then
log_info "Восстанавливаем исходную ветку: ${CURRENT_BRANCH}"
git checkout "${CURRENT_BRANCH}" || log_warn "Не удалось переключиться на ${CURRENT_BRANCH}"
fi
}
# Функция для сборки бинарного файла
build_binary() {
local platform=$1
local output_dir="bin-linux-${platform}"
local dockerfile="Dockerfiles/LocalCompile/Dockerfile"
log_info "Собираем для ${platform}..."
if docker build -f "$dockerfile" --target bin-linux --output "$output_dir/" --platform "linux/${platform}" .; then
cp "$output_dir/$BINARY_NAME" "binaries-for-upload/$BINARY_NAME.${platform}.${VERSION}"
log_info "Сборка для ${platform} завершена успешно"
else
log_error "Сборка для ${platform} не удалась"
return 1
fi
}
# Функция для git операций
git_operations() {
log_info "Выполняем git операции..."
git add -A . || { log_error "git add не удался"; return 1; }
git commit -m "release $VERSION" || { log_error "git commit не удался"; return 1; }
git tag -a "$VERSION" -m "release $VERSION" || { log_error "git tag не удался"; return 1; }
git push -u origin main --tags || { log_error "git push не удался"; return 1; }
log_info "Git операции завершены успешно"
}
# Основная функция
main() {
log_info "Начинаем сборку версии: $VERSION"
# Записываем версию в файл
echo "$VERSION" > VERSION.txt
# Настраиваем кэш Go
export GOCACHE="${HOME}/.cache/go-build"
# Сохраняем текущую ветку
CURRENT_BRANCH=$(git branch --show-current)
# Настраиваем обработчик ошибок
trap handle_error ERR
trap restore_branch EXIT
# Переключаемся на нужную ветку если необходимо
if [[ "$CURRENT_BRANCH" != "$BRANCH" ]]; then
log_info "Переключаемся на ветку: $BRANCH"
git checkout "$BRANCH"
fi
# Получаем теги
log_info "Получаем теги из удаленного репозитория..."
git fetch --tags
# Проверяем существование тега
if git rev-parse "refs/tags/${VERSION}" >/dev/null 2>&1; then
log_error "Тег ${VERSION} уже существует. Прерываем выполнение."
exit 1
fi
# Создаем директорию для бинарных файлов
mkdir -p binaries-for-upload
# Собираем бинарные файлы для обеих платформ
build_binary "amd64"
build_binary "arm64"
# Собираем и пушим Docker образы
log_info "Собираем и пушим multi-platform Docker образы..."
if docker buildx build -f Dockerfiles/ImageBuild/Dockerfile --push --platform linux/amd64,linux/arm64 -t "${REPO}:${VERSION}" .; then
log_info "Docker образы успешно собраны и запушены"
else
log_error "Сборка Docker образов не удалась"
exit 1
fi
# Выполняем git операции
git_operations
log_info "Сборка версии $VERSION завершена успешно!"
}
# Запускаем основную функцию
main "$@"

View File

@@ -0,0 +1,8 @@
#!/bin/bash
docker build -f Dockerfiles/LocalCompile/Dockerfile --target bin-linux --output bin-linux-amd64/ --platform linux/amd64 .
docker build -f Dockerfiles/LocalCompile/Dockerfile --target bin-linux --output bin-linux-arm64/ --platform linux/arm64 .
# in linux setuid
# sudo chown root:root bin-linux/lcg
# sudo chmod +s bin-linux/lcg

View File

@@ -0,0 +1,41 @@
#!/bin/bash
REPO=kuznetcovay/go-lcg
VERSION=$1
if [ -z "$VERSION" ]; then
VERSION=v1.0.8
fi
BRANCH=main
echo "${VERSION}" > VERSION.txt
export GOCACHE="${HOME}/.cache/go-build"
# Save the current branch
CURRENT_BRANCH=$(git branch --show-current)
# Function to restore the original branch
function restore_branch {
echo "Restoring original branch: ${CURRENT_BRANCH}"
git checkout "${CURRENT_BRANCH}"
}
# Check if the current branch is different from the target branch
if [ "$CURRENT_BRANCH" != "$BRANCH" ]; then
# Set a trap to restore the branch on exit
trap restore_branch EXIT
echo "Switching to branch: ${BRANCH}"
git checkout ${BRANCH}
fi
# Run go tests
if ! go test -v -run=^Test; then
echo "Tests failed. Exiting..."
exit 1
fi
# Push multi-platform images
docker buildx build --push --platform linux/amd64,linux/arm64 -t ${REPO}:"${VERSION}" . ||
{
echo "docker buildx build --push failed. Exiting with code 1."
exit 1
}

View File

@@ -0,0 +1,6 @@
#!/usr/bin/bash
# shellcheck disable=SC2034
LCG_PROVIDER=proxy LCG_HOST=http://localhost:8080 LCG_MODEL=GigaChat-2-Max LCG_JWT_TOKEN=$(go-ansible-vault -a -i shell-code/jwt.admin.token get -m 'JWT_TOKEN' -q) go run . $1 $2 $3 $4 $5 $6 $7 $8 $9
LCG_PROVIDER=proxy LCG_HOST=https://direct-dev.ru LCG_MODEL=GigaChat-2-Max LCG_JWT_TOKEN=$(go-ansible-vault --key $(cat ~/.config/gak) -i ~/.config/jwt.direct-dev.ru get -m 'JWT_TOKEN' -q) go run . [your question here]

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Variables
VERSION_FILE="VERSION.txt"
GITHUB_TOKEN="${GITHUB_TOKEN}" # Replace with your GitHub token
REPO="direct-dev-ru/binaries" # Replace with your GitHub username/repo
TAG=lcg.$(cat "$VERSION_FILE")
echo TAG: $TAG
RELEASE_DIR="/home/su/projects/golang/linux-command-gpt/binaries-for-upload"
body="{\"tag_name\":\"${TAG}\", \"target_commitish\":\"main\", \"name\":\"${TAG}\", \
\"body\":\"${TAG}\", \"draft\":false, \"prerelease\":false, \"generate_release_notes\":false}"
echo BODY: $body
response=$(curl -L -X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${GITHUB_TOKEN}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
https://api.github.com/repos/direct-dev-ru/binaries/releases \
-d $body)
echo $response
# Extract the upload URL from the response
upload_url=$(echo "$response" | jq -r '.upload_url' | sed "s/{?name,label}//")
# Check if the release was created successfully
if [[ "$response" == *"Not Found"* ]]; then
echo "Error: Repository not found or invalid token."
exit 1
fi
# Upload each binary file
for file in "$RELEASE_DIR"/*; do
if [[ -f "$file" ]]; then
filename=$(basename "$file")
echo "Uploading $filename..."
response=$(curl -s -X POST -H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \
"$upload_url?name=$filename" \
--data-binary @"$file")
echo $response
fi
done
echo "All binaries uploaded successfully."