-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathexample-config.ini
More file actions
52 lines (40 loc) · 1.73 KB
/
example-config.ini
File metadata and controls
52 lines (40 loc) · 1.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#Copy paste this in ~/.coder/config.ini and edit as needed.
[default]
#Select the active provider: "ollama" or "gemini" or openai
active_provider = gemini
# Set to true to enable step-by-step confirmation for sensitive commands in the CLI.
human_in_the_loop = false
[model_features]
# Set to true if you are using a thinking model.
think = false
# Set to true if you are using a vision-capable model. It will give it features to read images videos, etc. and also browser features.
vision = true
#Currently this is the one active as mentionned in [default].
[gemini]
# Your specific Ollama model name for the basic agent
#model = qwencoder-6:latest
model = gemini-flash-lite-latest
# The model to use for the more complex autonomous agent workflow
autonomous_model = gemini-flash-lite-latest
#other supported providers:
[openai]
# Model names from OpenAI (e.g., gpt-4o, gpt-3.5-turbo)
# Your OPENAI_API_KEY must be set as an environment variable.
model = gpt-4o
autonomous_model = gpt-4o
[ollama]
# Your specific Ollama model name for the basic agent
model = qwencoder-6:latest
# The model to use for the more complex autonomous agent workflow
autonomous_model = qwen3-30-8:latest
# The host for your Ollama instance
host = http://localhost:11434
[vllm]
# The host for your vLLM OpenAI-compatible endpoint (e.g., http://localhost:8000/v1). Please note the /v1/, its very important. In ollama you remove it.
host = http://localhost:8000/v1
# The model name as served by your vLLM instance
model = meta-llama/Meta-Llama-3-8B-Instruct
autonomous_model = meta-llama/Meta-Llama-3-8B-Instruct
# API key for the vLLM endpoint, if required. Set to "none" if not needed.
# It's recommended to set this as an environment variable instead (e.g., VLLM_API_KEY).
api_key = none