Skip to content

Commit

Permalink
feat: use entire file in completion prompt
Browse files Browse the repository at this point in the history
  • Loading branch information
Leon committed Jan 24, 2024
1 parent 49690bc commit c540996
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 37 deletions.
3 changes: 2 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
OPENAI_MODEL=gpt-3.5-turbo
OPENAI_MODEL=gpt-3.5-turbo-16k
OPENAI_MAX_TOKENS=8096
OPENAI_API_KEY=
LOG_FILE=/app/helix-gpt.log
OPENAI_CONTEXT=
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@ You can configure helix-gpt by exposing either the environment variables below,

Environment vars
```
OPENAI_MODEL=gpt-3.5-turbo # Optional
OPENAI_MODEL=gpt-3.5-turbo-16k # Optional
OPENAI_API_KEY=123 # required
OPENAI_MAX_TOKENS=7000 # optional
LOG_FILE=/app/debug-helix-gpt.log # Optional
OPENAI_CONTEXT="A terrible code completion assistant" # Optional
```
Expand Down
14 changes: 9 additions & 5 deletions src/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,21 +67,25 @@ const main = async () => {
}

log("calling completion event", contentVersion, "<", lastContentVersion)
const content = await getContent(contents, request.params.position.line, request.params.position.character)
const { lastCharacter, lastLine, templatedContent } = await getContent(contents, request.params.position.line, request.params.position.character)

if (!triggerCharacters.includes(content.slice(-1))) {
log("skipping", content.slice(-1), "not in", triggerCharacters)
if (!triggerCharacters.includes(lastCharacter)) {
log("skipping", lastCharacter, "not in", triggerCharacters)
skip()
return
}

const hints = await getHints(content, language)
const hints = await getHints(templatedContent, language)

log("sending completion", JSON.stringify({
content, hints
templatedContent, hints
}))

const items = hints?.map((i) => {
if (i.startsWith(lastLine.trim())) {
i = i.slice(lastLine.trim().length)
}

const lines = i.split('\n')
const cleanLine = request.params.position.line + lines.length - 1
let cleanCharacter = lines.slice(-1)[0].length
Expand Down
6 changes: 5 additions & 1 deletion src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@ const { values } = parseArgs({
},
openaiModel: {
type: 'string',
default: Bun.env.OPENAI_MODEL ?? "gpt-3.5-turbo"
default: Bun.env.OPENAI_MODEL ?? "gpt-3.5-turbo-16k"
},
openaiMaxTokens: {
type: 'string',
default: Bun.env.OPENAI_MAX_TOKENS ?? "7000"
},
logFile: {
type: 'string',
Expand Down
30 changes: 6 additions & 24 deletions src/constants.ts
Original file line number Diff line number Diff line change
@@ -1,34 +1,16 @@
export const context = `As a language model optimized for code completion in the language <languageId>, follow these principles:
- Analyze the given code snippet to identify what type of construct it represents (e.g., function, loop, conditional statement).
- Discern any identifiable coding patterns or conventions in the provided snippet to maintain consistency in style and structure.
- Complete only the immediate section of code that is being worked on without expanding beyond its scope.
- Avoid adding comments or annotations within your response since they are not requested.
- Refrain from repeating any part of the original request's code in your output; focus solely on generating new content that logically and syntactically follows from it.`

export const context = `Continue the input code from the language <languageId> at the <BEGIN_COMPLETION> marker. Only respond with code.`
export const examples = [
{
role: "user",
content: "import example from \"./model\"\n\nconst testing = 123\nfunction randomString(",
content: `function randomInt(<BEGIN_COMPLETION>`
},
{
role: "assistant",
content: `length: number): string {
let result = '';
const characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
for (let i = 0; i < length; i++) {
result += characters.charAt(Math.floor(Math.random() * characters.length));
}
return result;
content: `min: number, max: number): number {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min + 1)) + min;
}`
},
{
role: "user",
content: "import test from \"util\"\n\nconst alphabet ",
},
{
role: "assistant",
content: " = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';"
}
]

10 changes: 6 additions & 4 deletions src/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,12 @@ export const getHints = async (contents: string, languageId: string, suggestions

const body = {
"model": config.openaiModel,
"max_tokens": 2048,
"max_tokens": parseInt(config.openaiMaxTokens as string),
"n": suggestions,
"temperature": 0.7,
"top_p": 1,
"frequency_penalty": 0,
"temperature": 1,
"top_p": 0.7,
"frequency_penalty": 1,
presence_penalty: 2,
messages
}

Expand All @@ -37,6 +38,7 @@ export const getHints = async (contents: string, languageId: string, suggestions
})

if (!response.ok) {
log("openai error", response.status, await response.text())
throw new Error("openai request failed with code: " + response.status)
}

Expand Down
8 changes: 7 additions & 1 deletion src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,13 @@ import config from "./config"
export const getContent = async (contents: string, line: number, column: number) => {
const lines = contents.split('\n').slice(0, line + 1)
lines[lines.length - 1] = lines[lines.length - 1].split('').slice(0, column).join('')
return lines.join('\n')
const lastLine = lines[lines.length - 1]
const contentBefore = lines.join('\n')
const contentAfter = contents.split('\n').slice(line + 1).join('\n')
const lastCharacter = contentBefore.slice(-1)
const templatedContent = `${contentBefore}<BEGIN_COMPLETION>\n${contentAfter}`

return { contentBefore, contentAfter, lastCharacter, templatedContent, lastLine }
}

export const log = (...args: any) => {
Expand Down

0 comments on commit c540996

Please sign in to comment.