编辑config.toml文件:
js
# model_provider = "wenwen"
# model = "gpt-5.2"
model_provider = "glm"
model = "glm-4.7"
model_reasoning_effort = "medium"
# 定义 Codex 执行敏感操作(如删除文件、安装包)时是否询问。
approval_policy = "never"
# 沙箱模式。restricted (受限), danger-full-access (允许 AI 完全访问你的本地系统文件和网络)
sandbox_mode = "danger-full-access"
# 隐私保护。开启后,Codex 不会将对话历史和代码上下文存储在本地日志或云端缓存中。
disable_response_storage = true
# --- windows wsl 设置(系统兼容) ---
windows_wsl_setup_acknowledged = true
[model_providers.glm]
name = "glm"
base_url="https://open.bigmodel.cn/api/coding/paas/v4"
env_key="GLM_API_KEY"
wire_api = "chat"
[model_providers.wenwen]
name = "wenwen"
base_url="https://code.wenwen-ai.com/v1"
wire_api = "responses"
requires_openai_auth = true
[model_providers.ollama]
name = "Ollama"
base_url="http://127.0.0.1:11434/v1"
env_key="ollama"
# --- 属性 ---
[profiles.use-glm]
model = "glm-4.7"
model_provider = "glm"
[profiles.use-wenwen]
model = "gpt-5.2"
model_provider = "wenwen"
[profiles.use-ollama]
model = "deepseek-r1:8b"
model_provider = "ollama"
[notice]
hide_gpt5_1_migration_prompt = true
[features]
# enable_experimental_windows_sandbox = true # 旧版
experimental_windows_sandbox = true
streamable_shell = true # enable the streamable exec tool
web_search_request = true # allow the model to request web searches
切换模型:
使用智谱的模型:
js
codex --profile use-glm "你是谁"
使用问问的模型:
js
codex --profile use-wenwen "你是谁"