[*]
# paramètres globaux (par défaut)
#ctx-size = 4096
#threads = 8
mmap = true
#cache-type-k = q8_0
#cache-type-v = q8_0
cache-type-k = q4_0
cache-type-v = q4_0
n-gpu-layers = 99
mlock = true
batch-size = 768
ubatch-size = 512

[Master-Of-Experts]
model = /models2/Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf
mmproj = /models2/qwen3.6-mmproj-F16.gguf
ctx-size = 262144
#ctx-size = 131072
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.5-9B-Hermes]
model = /models2/Carnice-9b-Q6_K.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
ctx-size = 262144
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.5-9B-DeepSeek-V4-Inventif]
model = /models2/Qwen3.5-9B-DeepSeek-V4-Flash-Q8_0.gguf
mmproj = /models2/Qwen3.5-9B-DeepSeek-V4-Flash-mmproj.gguf
ctx-size = 262144
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.5-9B-DeepSeek-V4-Coder]
model = /models2/Qwen3.5-9B-DeepSeek-V4-Flash-Q8_0.gguf
mmproj = /models2/Qwen3.5-9B-DeepSeek-V4-Flash-mmproj.gguf
ctx-size = 262144
temperature=0.7
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.5-9B-General]
model = /models2/Carnice-9b-Q6_K.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
ctx-size = 262144
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.6-35B-A3B-Q4-dualgpu]
model = /models2/Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf
mmproj = /models2/qwen3.6-mmproj-F16.gguf
ctx-size = 262144
#ctx-size = 131072
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1


[Qwen3.5-9B-Code]
model = /models2/Qwen3.5-9B-Q8_0.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
ctx-size = 131072
temperature=0.6
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=0.0
repeat-penalty=1.0

[Gemma-4-E4B-General]
model = /models2/gemma-4-E4B-it-Q6_K.gguf
#model = /models2/gemma-4-E4B-it-UD-Q8_K_XL.gguf
mmproj = /models2/mmproj-F16-gemma-4-E4B-it.gguf
ctx-size = 131072
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
reasoning=off

[Qwen3.5-9B-General]
model = /models2/Carnice-9b-Q6_K.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
ctx-size = 131072
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen-3.6-27B]
model = /models2/Qwen3.6-27B-UD-Q3_K_XL.gguf
ctx-size = 131072
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 1

[Qwen3.5-9B-General-old]
model = /models2/Qwen3.5-9B-UD-Q6_K_XL.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
ctx-size = 262144
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0
parallel = 2

[Qwen3.5-9B-Instruct-General]
model = /models2/Qwen3.5-9B-Q8_0.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-9B.gguf
reasoning = off
ctx-size = 131072
temperature=0.7
top-p=0.8
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0

[Qwen3.5-35B-A3B-General]
model = /models2/Qwen3.5-35B-A3B-UD-Q4_K_L.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-35B-A3B.gguf
ctx-size = 65536
batch-size = 2048
ubatch-size = 512
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0

[Qwen3.5-35B-A3B-General-Instruct]
model = /models2/Qwen3.5-35B-A3B-UD-Q4_K_L.gguf
mmproj = /models2/mmproj-F16-Qwen3.5-35B-A3B.gguf
reasoning = off
ctx-size = 65536
batch-size = 2048
ubatch-size = 512
temperature=1.0
top-p=0.95
top-k=20
min-p=0.0
presence-penalty=1.5
repeat-penalty=1.0

[Qwen3.5-4B]
model = /models2/Qwen3.5-4B-UD-Q8_K_XL.gguf
mmproj = /models2/mmproj-BF16-Qwen3.5-4B.gguf
ctx-size = 131072

[Nemotron-3-Nano-4B]
model = /models2/NVIDIA-Nemotron-3-Nano-4B-UD-Q8_K_XL.gguf
ctx-size = 131072

[Nemotron-Orchestrator]
model = /models2/Nemotron-Orchestrator-8B.Q6_K.gguf
ctx-size = 40960

[Gemma-4-Moe]
model = /models2/gemma-4-26B-A4B-it-UD-Q4_K_XL.gguf
mmproj = /models2/mmproj-google_gemma-4-26B-A4B-it-bf16.gguf
ctx-size = 131072