Compare commits

...

3 Commits

Author SHA1 Message Date
Thomas Kosiewski 0f5e96e4df fix(site): uppercase API in model config labels 2026-03-11 19:25:29 +00:00
Thomas Kosiewski cc7876e620 test(terraform): update duplicate env golden 2026-03-11 18:43:17 +00:00
Thomas Kosiewski 033f9583b6 feat(chatd): add OpenAI api mode override 2026-03-11 18:37:10 +00:00
10 changed files with 458 additions and 46 deletions
+27 -1
View File
@@ -46,6 +46,11 @@ var providerDisplayNameByName = map[string]string{
fantasyvercel.Name: "Vercel AI Gateway",
}
const (
openAIAPIModeResponses = "responses"
openAIAPIModeChatCompletions = "chat_completions"
)
// SupportedProviders returns all chat providers supported by Fantasy.
func SupportedProviders() []string {
return append([]string(nil), supportedProviderNames...)
@@ -638,6 +643,9 @@ func MergeMissingProviderOptions(
if dstOpenAI.User == nil {
dstOpenAI.User = defaultOpenAI.User
}
if dstOpenAI.APIMode == nil {
dstOpenAI.APIMode = defaultOpenAI.APIMode
}
if dstOpenAI.ReasoningEffort == nil {
dstOpenAI.ReasoningEffort = defaultOpenAI.ReasoningEffort
}
@@ -1050,7 +1058,7 @@ func openAIProviderOptionsFromChatConfig(
options *codersdk.ChatModelOpenAIProviderOptions,
) fantasy.ProviderOptionsData {
reasoningEffort := openAIReasoningEffortFromChat(options.ReasoningEffort)
if useOpenAIResponsesOptions(model) {
if openAIProviderUsesResponsesAPI(model, options) {
include := ensureOpenAIResponseIncludes(openAIIncludeFromChat(options.Include))
providerOptions := &fantasyopenai.ResponsesProviderOptions{
Include: include,
@@ -1250,6 +1258,24 @@ func ensureOpenAIResponseIncludes(
return append(values, required)
}
// openAIProviderUsesResponsesAPI keeps the existing model heuristic as the
// default while allowing admins to override stale allowlist decisions.
func openAIProviderUsesResponsesAPI(
model fantasy.LanguageModel,
options *codersdk.ChatModelOpenAIProviderOptions,
) bool {
if options != nil && options.APIMode != nil {
switch strings.ToLower(strings.TrimSpace(*options.APIMode)) {
case openAIAPIModeResponses:
return true
case openAIAPIModeChatCompletions:
return false
}
}
return useOpenAIResponsesOptions(model)
}
func useOpenAIResponsesOptions(model fantasy.LanguageModel) bool {
if model == nil {
return false
@@ -1,8 +1,10 @@
package chatprovider_test
import (
"context"
"testing"
"charm.land/fantasy"
fantasyanthropic "charm.land/fantasy/providers/anthropic"
fantasyopenai "charm.land/fantasy/providers/openai"
fantasyopenrouter "charm.land/fantasy/providers/openrouter"
@@ -77,6 +79,75 @@ func TestReasoningEffortFromChat(t *testing.T) {
}
}
func TestProviderOptionsFromChatModelConfig_OpenAIAPIMode(t *testing.T) {
t.Parallel()
tests := []struct {
name string
provider string
model string
apiMode *string
wantResponses bool
}{
{
name: "UnknownModelUsesChatCompletionsHeuristicByDefault",
provider: "openai",
model: "gpt-5.4",
wantResponses: false,
},
{
name: "UnknownModelCanForceResponses",
provider: "openai",
model: "gpt-5.4",
apiMode: stringPtr("responses"),
wantResponses: true,
},
{
name: "KnownResponsesModelUsesResponsesHeuristicByDefault",
provider: "openai",
model: "gpt-5.2",
wantResponses: true,
},
{
name: "KnownResponsesModelCanForceChatCompletions",
provider: "openai",
model: "gpt-5.2",
apiMode: stringPtr("chat_completions"),
wantResponses: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
providerOptions := chatprovider.ProviderOptionsFromChatModelConfig(
fakeLanguageModel{provider: tt.provider, model: tt.model},
&codersdk.ChatModelProviderOptions{
OpenAI: &codersdk.ChatModelOpenAIProviderOptions{
APIMode: tt.apiMode,
},
},
)
require.NotNil(t, providerOptions)
openAIOptions, ok := providerOptions[fantasyopenai.Name]
require.True(t, ok)
require.NotNil(t, openAIOptions)
if tt.wantResponses {
_, ok = openAIOptions.(*fantasyopenai.ResponsesProviderOptions)
require.True(t, ok)
return
}
_, ok = openAIOptions.(*fantasyopenai.ProviderOptions)
require.True(t, ok)
})
}
}
func TestMergeMissingProviderOptions_OpenRouterNested(t *testing.T) {
t.Parallel()
@@ -155,6 +226,7 @@ func TestMergeMissingCallConfig_FillsUnsetFields(t *testing.T) {
ProviderOptions: &codersdk.ChatModelProviderOptions{
OpenAI: &codersdk.ChatModelOpenAIProviderOptions{
User: stringPtr("bob"),
APIMode: stringPtr("responses"),
ReasoningEffort: stringPtr("medium"),
},
},
@@ -171,9 +243,39 @@ func TestMergeMissingCallConfig_FillsUnsetFields(t *testing.T) {
require.NotNil(t, dst.ProviderOptions)
require.NotNil(t, dst.ProviderOptions.OpenAI)
require.Equal(t, "alice", *dst.ProviderOptions.OpenAI.User)
require.Equal(t, "responses", *dst.ProviderOptions.OpenAI.APIMode)
require.Equal(t, "medium", *dst.ProviderOptions.OpenAI.ReasoningEffort)
}
type fakeLanguageModel struct {
provider string
model string
}
func (fakeLanguageModel) Generate(context.Context, fantasy.Call) (*fantasy.Response, error) {
panic("unexpected Generate call")
}
func (fakeLanguageModel) Stream(context.Context, fantasy.Call) (fantasy.StreamResponse, error) {
panic("unexpected Stream call")
}
func (fakeLanguageModel) GenerateObject(context.Context, fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
panic("unexpected GenerateObject call")
}
func (fakeLanguageModel) StreamObject(context.Context, fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
panic("unexpected StreamObject call")
}
func (f fakeLanguageModel) Provider() string {
return f.provider
}
func (f fakeLanguageModel) Model() string {
return f.model
}
func stringPtr(value string) *string {
return &value
}
+1
View File
@@ -301,6 +301,7 @@ type ChatModelOpenAIProviderOptions struct {
MaxToolCalls *int64 `json:"max_tool_calls,omitempty" description:"Maximum number of tool calls per response"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty" description:"Whether the model may make multiple tool calls in parallel"`
User *string `json:"user,omitempty" description:"Unique identifier for the end user for abuse monitoring" hidden:"true"`
APIMode *string `json:"api_mode,omitempty" description:"Force the OpenAI API for this model. Leave unset to use the backend heuristic." enum:"responses,chat_completions"`
ReasoningEffort *string `json:"reasoning_effort,omitempty" description:"Controls the level of reasoning effort" enum:"none,minimal,low,medium,high,xhigh"`
ReasoningSummary *string `json:"reasoning_summary,omitempty" description:"Controls whether reasoning tokens are summarized in the response"`
MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty" description:"Upper bound on tokens the model may generate"`
+6 -5
View File
@@ -128,11 +128,12 @@ fields appear dynamically in the admin UI when you select a provider.
#### OpenAI
| Option | Description |
|-----------------------|-----------------------------------------------------------------------|
| Reasoning Effort | How much effort the model spends reasoning (`low`, `medium`, `high`). |
| Max Completion Tokens | Cap on completion tokens for reasoning models. |
| Parallel Tool Calls | Whether the model can call multiple tools at once. |
| Option | Description |
|-----------------------|--------------------------------------------------------------------------------------------------------|
| API Mode | Force `responses` or `chat_completions`, or leave it unset to use Coder's automatic routing heuristic. |
| Reasoning Effort | How much effort the model spends reasoning (`low`, `medium`, `high`). |
| Max Completion Tokens | Cap on completion tokens for reasoning models. |
| Parallel Tool Calls | Whether the model can call multiple tools at once. |
#### Google
@@ -5,12 +5,11 @@
"type": "null_resource",
"agents": [
{
"id": "aaaaaaaa-1111-2222-3333-444444444444",
"name": "dev",
"operating_system": "linux",
"architecture": "amd64",
"Auth": {
"Token": "11111111-2222-3333-4444-555555555555"
"Token": ""
},
"connection_timeout_seconds": 120,
"display_apps": {
@@ -17,18 +17,7 @@
"auth": "token",
"connection_timeout": 120,
"dir": null,
"display_apps": [
{
"port_forwarding_helper": true,
"ssh_helper": true,
"vscode": true,
"vscode_insiders": false,
"web_terminal": true
}
],
"env": null,
"id": "aaaaaaaa-1111-2222-3333-444444444444",
"init_script": "",
"metadata": [],
"motd_file": null,
"order": null,
@@ -37,13 +26,10 @@
"shutdown_script": null,
"startup_script": null,
"startup_script_behavior": "non-blocking",
"token": "11111111-2222-3333-4444-555555555555",
"troubleshooting_url": null
},
"sensitive_values": {
"display_apps": [
{}
],
"display_apps": [],
"metadata": [],
"resources_monitoring": [],
"token": true
@@ -57,15 +43,10 @@
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 1,
"values": {
"agent_id": "aaaaaaaa-1111-2222-3333-444444444444",
"id": "bbbbbbbb-1111-2222-3333-444444444444",
"name": "PATH",
"value": "/a/bin"
},
"sensitive_values": {},
"depends_on": [
"coder_agent.dev"
]
"sensitive_values": {}
},
{
"address": "coder_env.path_b",
@@ -75,15 +56,10 @@
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 1,
"values": {
"agent_id": "aaaaaaaa-1111-2222-3333-444444444444",
"id": "cccccccc-1111-2222-3333-444444444444",
"name": "PATH",
"value": "/b/bin"
},
"sensitive_values": {},
"depends_on": [
"coder_agent.dev"
]
"sensitive_values": {}
},
{
"address": "coder_env.unique_env",
@@ -93,15 +69,10 @@
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 1,
"values": {
"agent_id": "aaaaaaaa-1111-2222-3333-444444444444",
"id": "dddddddd-1111-2222-3333-444444444444",
"name": "UNIQUE",
"value": "unique_value"
},
"sensitive_values": {},
"depends_on": [
"coder_agent.dev"
]
"sensitive_values": {}
},
{
"address": "null_resource.dev",
@@ -111,15 +82,270 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "1234567890123456789",
"triggers": null
},
"sensitive_values": {},
"sensitive_values": {}
}
]
}
},
"resource_changes": [
{
"address": "coder_agent.dev",
"mode": "managed",
"type": "coder_agent",
"name": "dev",
"provider_name": "registry.terraform.io/coder/coder",
"change": {
"actions": [
"create"
],
"before": null,
"after": {
"api_key_scope": "all",
"arch": "amd64",
"auth": "token",
"connection_timeout": 120,
"dir": null,
"env": null,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"resources_monitoring": [],
"shutdown_script": null,
"startup_script": null,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
"display_apps": true,
"id": true,
"init_script": true,
"metadata": [],
"resources_monitoring": [],
"token": true
},
"before_sensitive": false,
"after_sensitive": {
"display_apps": [],
"metadata": [],
"resources_monitoring": [],
"token": true
}
}
},
{
"address": "coder_env.path_a",
"mode": "managed",
"type": "coder_env",
"name": "path_a",
"provider_name": "registry.terraform.io/coder/coder",
"change": {
"actions": [
"create"
],
"before": null,
"after": {
"name": "PATH",
"value": "/a/bin"
},
"after_unknown": {
"agent_id": true,
"id": true
},
"before_sensitive": false,
"after_sensitive": {}
}
},
{
"address": "coder_env.path_b",
"mode": "managed",
"type": "coder_env",
"name": "path_b",
"provider_name": "registry.terraform.io/coder/coder",
"change": {
"actions": [
"create"
],
"before": null,
"after": {
"name": "PATH",
"value": "/b/bin"
},
"after_unknown": {
"agent_id": true,
"id": true
},
"before_sensitive": false,
"after_sensitive": {}
}
},
{
"address": "coder_env.unique_env",
"mode": "managed",
"type": "coder_env",
"name": "unique_env",
"provider_name": "registry.terraform.io/coder/coder",
"change": {
"actions": [
"create"
],
"before": null,
"after": {
"name": "UNIQUE",
"value": "unique_value"
},
"after_unknown": {
"agent_id": true,
"id": true
},
"before_sensitive": false,
"after_sensitive": {}
}
},
{
"address": "null_resource.dev",
"mode": "managed",
"type": "null_resource",
"name": "dev",
"provider_name": "registry.terraform.io/hashicorp/null",
"change": {
"actions": [
"create"
],
"before": null,
"after": {
"triggers": null
},
"after_unknown": {
"id": true
},
"before_sensitive": false,
"after_sensitive": {}
}
}
],
"configuration": {
"provider_config": {
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
"full_name": "registry.terraform.io/hashicorp/null"
}
},
"root_module": {
"resources": [
{
"address": "coder_agent.dev",
"mode": "managed",
"type": "coder_agent",
"name": "dev",
"provider_config_key": "coder",
"expressions": {
"arch": {
"constant_value": "amd64"
},
"os": {
"constant_value": "linux"
}
},
"schema_version": 1
},
{
"address": "coder_env.path_a",
"mode": "managed",
"type": "coder_env",
"name": "path_a",
"provider_config_key": "coder",
"expressions": {
"agent_id": {
"references": [
"coder_agent.dev.id",
"coder_agent.dev"
]
},
"name": {
"constant_value": "PATH"
},
"value": {
"constant_value": "/a/bin"
}
},
"schema_version": 1
},
{
"address": "coder_env.path_b",
"mode": "managed",
"type": "coder_env",
"name": "path_b",
"provider_config_key": "coder",
"expressions": {
"agent_id": {
"references": [
"coder_agent.dev.id",
"coder_agent.dev"
]
},
"name": {
"constant_value": "PATH"
},
"value": {
"constant_value": "/b/bin"
}
},
"schema_version": 1
},
{
"address": "coder_env.unique_env",
"mode": "managed",
"type": "coder_env",
"name": "unique_env",
"provider_config_key": "coder",
"expressions": {
"agent_id": {
"references": [
"coder_agent.dev.id",
"coder_agent.dev"
]
},
"name": {
"constant_value": "UNIQUE"
},
"value": {
"constant_value": "unique_value"
}
},
"schema_version": 1
},
{
"address": "null_resource.dev",
"mode": "managed",
"type": "null_resource",
"name": "dev",
"provider_config_key": "null",
"schema_version": 0,
"depends_on": [
"coder_agent.dev"
]
}
]
}
}
},
"relevant_attributes": [
{
"resource": "coder_agent.dev",
"attribute": [
"id"
]
}
],
"timestamp": "2026-03-11T18:35:57Z",
"applyable": true,
"complete": true,
"errored": false
}
@@ -207,6 +207,15 @@
"input_type": "input",
"hidden": true
},
{
"json_name": "api_mode",
"go_name": "APIMode",
"type": "string",
"description": "Force the OpenAI API for this model. Leave unset to use the backend heuristic.",
"required": false,
"enum": ["responses", "chat_completions"],
"input_type": "select"
},
{
"json_name": "reasoning_effort",
"go_name": "ReasoningEffort",
+1
View File
@@ -1341,6 +1341,7 @@ export interface ChatModelOpenAIProviderOptions {
readonly max_tool_calls?: number;
readonly parallel_tool_calls?: boolean;
readonly user?: string;
readonly api_mode?: string;
readonly reasoning_effort?: string;
readonly reasoning_summary?: string;
readonly max_completion_tokens?: number;
@@ -41,7 +41,12 @@ const unsetSelectValue = "__unset__";
function snakeToPrettyLabel(jsonName: string): string {
return jsonName
.split(/[._]/)
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.map((word) => {
if (word === "api") {
return "API";
}
return word.charAt(0).toUpperCase() + word.slice(1);
})
.join(" ");
}
@@ -211,6 +211,7 @@ describe("extractModelConfigFormState", () => {
model_config: {
provider_options: {
openai: {
api_mode: "responses",
reasoning_effort: "high",
parallel_tool_calls: true,
text_verbosity: "medium",
@@ -223,6 +224,7 @@ describe("extractModelConfigFormState", () => {
};
const result = extractModelConfigFormState(model);
const openai = result.openai as Record<string, unknown>;
expect(openai.apiMode).toBe("responses");
expect(openai.reasoningEffort).toBe("high");
expect(openai.parallelToolCalls).toBe("true");
expect(openai.textVerbosity).toBe("medium");
@@ -523,6 +525,17 @@ describe("buildModelConfigFromForm", () => {
});
});
it("builds OpenAI provider options with api mode", () => {
const result = buildModelConfigFromForm(
"openai",
formWith({ openai: { apiMode: "responses" } }),
);
expect(result.fieldErrors).toEqual({});
expect(result.modelConfig?.provider_options?.openai).toEqual({
api_mode: "responses",
});
});
it("builds Azure provider options (same as OpenAI)", () => {
const result = buildModelConfigFromForm(
"azure",
@@ -534,11 +547,23 @@ describe("buildModelConfigFromForm", () => {
});
});
it("builds Azure api mode under the OpenAI provider options key", () => {
const result = buildModelConfigFromForm(
"azure",
formWith({ openai: { apiMode: "chat_completions" } }),
);
expect(result.fieldErrors).toEqual({});
expect(result.modelConfig?.provider_options?.openai).toEqual({
api_mode: "chat_completions",
});
});
it("builds OpenAI options with all fields set", () => {
const result = buildModelConfigFromForm(
"openai",
formWith({
openai: {
apiMode: "responses",
reasoningEffort: "medium",
parallelToolCalls: "false",
textVerbosity: "low",
@@ -553,6 +578,7 @@ describe("buildModelConfigFromForm", () => {
string,
unknown
>;
expect(openai.api_mode).toBe("responses");
expect(openai.reasoning_effort).toBe("medium");
expect(openai.parallel_tool_calls).toBe(false);
expect(openai.text_verbosity).toBe("low");
@@ -571,6 +597,14 @@ describe("buildModelConfigFromForm", () => {
);
});
it("reports error for invalid api mode option", () => {
const result = buildModelConfigFromForm(
"openai",
formWith({ openai: { apiMode: "bogus" } }),
);
expect(result.fieldErrors["openai.apiMode"]).toContain("invalid value");
});
it("reports error for invalid parallel tool calls boolean", () => {
const result = buildModelConfigFromForm(
"openai",
@@ -591,6 +625,15 @@ describe("buildModelConfigFromForm", () => {
);
});
it("omits provider_options when api mode is blank", () => {
const result = buildModelConfigFromForm(
"openai",
formWith({ openai: { apiMode: "" } }),
);
expect(result.fieldErrors).toEqual({});
expect(result.modelConfig).toBeUndefined();
});
it("does not set provider_options when all OpenAI fields are empty", () => {
const result = buildModelConfigFromForm(
"openai",
@@ -600,7 +643,6 @@ describe("buildModelConfigFromForm", () => {
expect(result.modelConfig?.provider_options).toBeUndefined();
});
});
describe("Anthropic / Bedrock provider", () => {
it("builds Anthropic provider options with effort", () => {
const result = buildModelConfigFromForm(