Skip to content

Commit

Permalink
simplified new role syntax
Browse files Browse the repository at this point in the history
  • Loading branch information
madox2 committed Dec 17, 2024
1 parent 8fde389 commit 44625c9
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 66 deletions.
36 changes: 21 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -196,14 +196,14 @@ let g:vim_ai_roles_config_file = '/path/to/my/roles.ini'

[grammar]
prompt = fix spelling and grammar
config.options.temperature = 0.4
options.temperature = 0.4

[o1-mini]
config.options.stream = 0
config.options.model = o1-mini
config.options.max_completion_tokens = 25000
config.options.temperature = 1
config.options.initial_prompt =
options.stream = 0
options.model = o1-mini
options.max_completion_tokens = 25000
options.temperature = 1
options.initial_prompt =
```

Now you can select text and run it with command `:AIEdit /grammar`.
Expand Down Expand Up @@ -290,6 +290,7 @@ If you answer in a code, do not wrap it in markdown code block.
END
" :AI
" - prompt: optional prepended prompt
" - engine: chat | complete - see how to configure complete engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
Expand All @@ -299,6 +300,7 @@ END
" - options.selection_boundary: selection prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_complete = {
\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
Expand All @@ -319,6 +321,7 @@ let g:vim_ai_complete = {
\}
" :AIEdit
" - prompt: optional prepended prompt
" - engine: chat | complete - see how to configure complete engine in the section below
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
Expand All @@ -328,6 +331,7 @@ let g:vim_ai_complete = {
" - options.selection_boundary: selection prompt wrapper (eliminates empty responses, see #20)
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_edit = {
\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
Expand Down Expand Up @@ -356,6 +360,7 @@ If you attach a code block add syntax type after ``` to enable syntax highlighti
END
" :AIChat
" - prompt: optional prepended prompt
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.request_timeout: request timeout in seconds
Expand All @@ -367,6 +372,7 @@ END
" - ui.scratch_buffer_keep_open: re-use scratch buffer within the vim session
" - ui.paste_mode: use paste mode (see more info in the Notes below)
let g:vim_ai_chat = {
\ "prompt": "",
\ "options": {
\ "model": "gpt-4o",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
Expand Down Expand Up @@ -423,19 +429,19 @@ Then you set up a custom role that points to the OpenRouter endpoint:

```ini
[gemini]
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = google/gemini-exp-1121:free
options.token_file_path = ~/.config/vim-ai-openrouter.token
options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
options.model = google/gemini-exp-1121:free

[llama]
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = meta-llama/llama-3.3-70b-instruct
options.token_file_path = ~/.config/vim-ai-openrouter.token
options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
options.model = meta-llama/llama-3.3-70b-instruct

[claude]
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = anthropic/claude-3.5-haiku
options.token_file_path = ~/.config/vim-ai-openrouter.token
options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
options.model = anthropic/claude-3.5-haiku
```

Now you can use the role:
Expand Down
3 changes: 3 additions & 0 deletions autoload/vim_ai_config.vim
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ Do not provide any explanantion or comments if not requested.
If you answer in a code, do not wrap it in markdown code block.
END
let g:vim_ai_complete_default = {
\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
Expand All @@ -28,6 +29,7 @@ let g:vim_ai_complete_default = {
\ },
\}
let g:vim_ai_edit_default = {
\ "prompt": "",
\ "engine": "chat",
\ "options": {
\ "model": "gpt-4o",
Expand All @@ -54,6 +56,7 @@ You are a general assistant.
If you attach a code block add syntax type after ``` to enable syntax highlighting.
END
let g:vim_ai_chat_default = {
\ "prompt": "",
\ "options": {
\ "model": "gpt-4o",
\ "endpoint_url": "https://api.openai.com/v1/chat/completions",
Expand Down
2 changes: 1 addition & 1 deletion doc/vim-ai.txt
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ Example of a role: >

[grammar]
prompt = fix spelling and grammar
config.options.temperature = 0.4
options.temperature = 0.4

Now you can select text and run it with command `:AIEdit /grammar`.
See roles-example.ini for more examples.
Expand Down
53 changes: 22 additions & 31 deletions py/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,31 +39,23 @@ def load_roles_with_deprecated_syntax(roles, role):
return {
'role_default': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options", {})),
'ui': dict(roles.get(f"{role}.ui", {})),
},
'options': dict(roles.get(f"{role}.options", {})),
'ui': dict(roles.get(f"{role}.ui", {})),
},
'role_complete': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-complete", {})),
'ui': dict(roles.get(f"{role}.ui-complete", {})),
},
'options': dict(roles.get(f"{role}.options-complete", {})),
'ui': dict(roles.get(f"{role}.ui-complete", {})),
},
'role_edit': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-edit", {})),
'ui': dict(roles.get(f"{role}.ui-edit", {})),
},
'options': dict(roles.get(f"{role}.options-edit", {})),
'ui': dict(roles.get(f"{role}.ui-edit", {})),
},
'role_chat': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-chat", {})),
'ui': dict(roles.get(f"{role}.ui-chat", {})),
},
'options': dict(roles.get(f"{role}.options-chat", {})),
'ui': dict(roles.get(f"{role}.ui-chat", {})),
},
}

Expand Down Expand Up @@ -120,20 +112,18 @@ def parse_prompt_and_role_config(user_instruction, command_type):
roles = parse_role_names(user_instruction)
if not roles:
# does not require role
return (user_instruction, '', {})
return (user_instruction, {})

last_role = roles[-1]
user_prompt = user_instruction[user_instruction.index(last_role) + len(last_role):].strip() # strip roles

parsed_role = merge_deep([load_role_config(role) for role in roles])
role_default = parsed_role['role_default']
role_command = parsed_role['role_' + command_type]
config = merge_deep([role_default.get('config', {}), role_command.get('config', {})])
role_prompt = role_default.get('prompt') or role_command.get('prompt', '')
return user_prompt, role_prompt, config

def make_selection_prompt(user_selection, user_prompt, role_prompt, selection_boundary):
if not user_prompt and not role_prompt:
config = merge_deep([parsed_role['role_default'], parsed_role['role_' + command_type]])
role_prompt = config.get('prompt', '')
return user_prompt, config

def make_selection_prompt(user_selection, user_prompt, config_prompt, selection_boundary):
if not user_prompt and not config_prompt:
return user_selection
elif user_selection:
if selection_boundary and selection_boundary not in user_selection:
Expand All @@ -142,15 +132,15 @@ def make_selection_prompt(user_selection, user_prompt, role_prompt, selection_bo
return user_selection
return ''

def make_prompt(role_prompt, user_prompt, user_selection, selection_boundary):
def make_prompt(config_prompt, user_prompt, user_selection, selection_boundary):
user_prompt = user_prompt.strip()
delimiter = ":\n" if user_prompt and user_selection else ""
user_selection = make_selection_prompt(user_selection, user_prompt, role_prompt, selection_boundary)
user_selection = make_selection_prompt(user_selection, user_prompt, config_prompt, selection_boundary)
prompt = f"{user_prompt}{delimiter}{user_selection}"
if not role_prompt:
if not config_prompt:
return prompt
delimiter = '' if prompt.startswith(':') else ':\n'
prompt = f"{role_prompt}{delimiter}{prompt}"
prompt = f"{config_prompt}{delimiter}{prompt}"
return prompt

def make_ai_context(params):
Expand All @@ -160,10 +150,11 @@ def make_ai_context(params):
user_selection = params['user_selection']
command_type = params['command_type']

user_prompt, role_prompt, role_config = parse_prompt_and_role_config(user_instruction, command_type)
user_prompt, role_config = parse_prompt_and_role_config(user_instruction, command_type)
final_config = merge_deep([config_default, config_extension, role_config])
selection_boundary = final_config['options']['selection_boundary']
prompt = make_prompt(role_prompt, user_prompt, user_selection, selection_boundary)
config_prompt = final_config.get('prompt', '')
prompt = make_prompt(config_prompt, user_prompt, user_selection, selection_boundary)

return {
'config': final_config,
Expand Down
18 changes: 9 additions & 9 deletions roles-example.ini
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,18 @@ prompt =
please refactor it in a more clean and concise way so that my colleagues
can maintain the code more easily. Also, explain why you want to refactor
the code so that I can add the explanation to the Pull Request.
config.options.temperature = 0.4
options.temperature = 0.4
# command specific options:
[refactor.chat]
config.options.model = gpt-4o
options.model = gpt-4o
[refactor.complete]
config.options.model = gpt-4
options.model = gpt-4
[refactor.edit]
config.options.model = gpt-4
options.model = gpt-4

[o1-mini]
config.options.stream = 0
config.options.model = o1-mini
config.options.max_completion_tokens = 25000
config.options.temperature = 1
config.options.initial_prompt =
options.stream = 0
options.model = o1-mini
options.max_completion_tokens = 25000
options.temperature = 1
options.initial_prompt =
20 changes: 10 additions & 10 deletions tests/resources/roles.ini
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
[test-role-simple]
prompt = simple role prompt
config.options.model = o1-preview
options.model = o1-preview

[test-role]
config.options.model = model-common
config.ui.paste_mode = 0
options.model = model-common
ui.paste_mode = 0
[test-role.chat]
config.options.endpoint_url = https://localhost/chat
config.ui.open_chat_command = preset_tab
options.endpoint_url = https://localhost/chat
ui.open_chat_command = preset_tab
[test-role.complete]
config.engine = complete
config.options.endpoint_url = https://localhost/complete
engine = complete
options.endpoint_url = https://localhost/complete
[test-role.edit]
config.engine = complete
config.options.endpoint_url = https://localhost/edit
engine = complete
options.endpoint_url = https://localhost/edit

[chat-only-role.chat]
config.options.open_chat_command = preset_tab
options.open_chat_command = preset_tab

[deprecated-test-role-simple]
prompt = simple role prompt
Expand Down

0 comments on commit 44625c9

Please sign in to comment.