Skip to content

Commit 2d9a26d

Browse files
keith666666claude
andcommitted
feat(ollama): add OCO_OLLAMA_THINK config to control thinking mode
Adds support for passing the `think` param to Ollama's /api/chat endpoint, allowing users to disable reasoning blocks on models like qwen3.5 via `oco config set OCO_OLLAMA_THINK=false`. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent f300b5d commit 2d9a26d

File tree

5 files changed

+88
-4
lines changed

5 files changed

+88
-4
lines changed

src/commands/config.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ export enum CONFIG_KEYS {
2929
OCO_API_CUSTOM_HEADERS = 'OCO_API_CUSTOM_HEADERS',
3030
OCO_OMIT_SCOPE = 'OCO_OMIT_SCOPE',
3131
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
32-
OCO_HOOK_AUTO_UNCOMMENT = 'OCO_HOOK_AUTO_UNCOMMENT'
32+
OCO_HOOK_AUTO_UNCOMMENT = 'OCO_HOOK_AUTO_UNCOMMENT',
33+
OCO_OLLAMA_THINK = 'OCO_OLLAMA_THINK'
3334
}
3435

3536
export enum CONFIG_MODES {
@@ -838,6 +839,15 @@ export const configValidators = {
838839
typeof value === 'boolean',
839840
'Must be true or false'
840841
);
842+
},
843+
844+
[CONFIG_KEYS.OCO_OLLAMA_THINK](value: any) {
845+
validateConfig(
846+
CONFIG_KEYS.OCO_OLLAMA_THINK,
847+
typeof value === 'boolean',
848+
'Must be true or false'
849+
);
850+
return value;
841851
}
842852
};
843853

@@ -905,6 +915,7 @@ export type ConfigType = {
905915
[CONFIG_KEYS.OCO_OMIT_SCOPE]: boolean;
906916
[CONFIG_KEYS.OCO_TEST_MOCK_TYPE]: string;
907917
[CONFIG_KEYS.OCO_HOOK_AUTO_UNCOMMENT]: boolean;
918+
[CONFIG_KEYS.OCO_OLLAMA_THINK]?: boolean;
908919
};
909920

910921
export const defaultConfigPath = pathJoin(homedir(), '.opencommit');

src/engine/Engine.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ export interface AiEngineConfig {
1313
baseURL?: string;
1414
proxy?: string;
1515
customHeaders?: Record<string, string>;
16+
ollamaThink?: boolean;
1617
}
1718

1819
type Client =

src/engine/ollama.ts

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ import { normalizeEngineError } from '../utils/engineErrorHandler';
44
import { removeContentTags } from '../utils/removeContentTags';
55
import { AiEngine, AiEngineConfig } from './Engine';
66

7-
interface OllamaConfig extends AiEngineConfig {}
7+
interface OllamaConfig extends AiEngineConfig {
8+
ollamaThink?: boolean;
9+
}
810

911
const DEFAULT_OLLAMA_URL = 'http://localhost:11434';
1012
const OLLAMA_CHAT_PATH = '/api/chat';
@@ -32,12 +34,15 @@ export class OllamaEngine implements AiEngine {
3234
async generateCommitMessage(
3335
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
3436
): Promise<string | undefined> {
35-
const params = {
37+
const params: Record<string, any> = {
3638
model: this.config.model ?? 'mistral',
3739
messages,
3840
options: { temperature: 0, top_p: 0.1 },
3941
stream: false
4042
};
43+
if (typeof this.config.ollamaThink === 'boolean') {
44+
params.think = this.config.ollamaThink;
45+
}
4146
try {
4247
const response = await this.client.post(this.chatUrl, params);
4348

src/utils/engine.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,10 @@ export function getEngine(): AiEngine {
5454

5555
switch (provider) {
5656
case OCO_AI_PROVIDER_ENUM.OLLAMA:
57-
return new OllamaEngine(DEFAULT_CONFIG);
57+
return new OllamaEngine({
58+
...DEFAULT_CONFIG,
59+
ollamaThink: config.OCO_OLLAMA_THINK
60+
});
5861

5962
case OCO_AI_PROVIDER_ENUM.ANTHROPIC:
6063
return new AnthropicEngine(DEFAULT_CONFIG);

test/unit/ollama.test.ts

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import { OllamaEngine } from '../../src/engine/ollama';
2+
3+
describe('OllamaEngine', () => {
4+
it('sends think=false when configured', async () => {
5+
const engine = new OllamaEngine({
6+
apiKey: 'ollama',
7+
model: 'qwen3.5:2b',
8+
maxTokensOutput: 500,
9+
maxTokensInput: 4096,
10+
ollamaThink: false
11+
});
12+
13+
const post = jest.fn().mockResolvedValue({
14+
data: {
15+
message: {
16+
content: 'feat: add support for ollama think config'
17+
}
18+
}
19+
});
20+
21+
engine.client = { post } as any;
22+
23+
await engine.generateCommitMessage([
24+
{ role: 'user', content: 'diff --git a/file b/file' }
25+
]);
26+
27+
expect(post).toHaveBeenCalledWith(
28+
'http://localhost:11434/api/chat',
29+
expect.objectContaining({
30+
think: false
31+
})
32+
);
33+
});
34+
35+
it('omits think when not configured', async () => {
36+
const engine = new OllamaEngine({
37+
apiKey: 'ollama',
38+
model: 'qwen3.5:2b',
39+
maxTokensOutput: 500,
40+
maxTokensInput: 4096
41+
});
42+
43+
const post = jest.fn().mockResolvedValue({
44+
data: {
45+
message: {
46+
content: 'feat: add support for ollama think config'
47+
}
48+
}
49+
});
50+
51+
engine.client = { post } as any;
52+
53+
await engine.generateCommitMessage([
54+
{ role: 'user', content: 'diff --git a/file b/file' }
55+
]);
56+
57+
expect(post).toHaveBeenCalledWith(
58+
'http://localhost:11434/api/chat',
59+
expect.not.objectContaining({
60+
think: expect.anything()
61+
})
62+
);
63+
});
64+
});

0 commit comments

Comments
 (0)