Make “Reasoning Summary” configurable in OpenAI (#157557)

Co-authored-by: cto-new[bot] <140088366+cto-new[bot]@users.noreply.github.com>
Co-authored-by: Joost Lekkerkerker <joostlek@outlook.com>
This commit is contained in:
XHyperDEVX
2026-02-09 16:29:27 +01:00
committed by GitHub
parent 08acececb2
commit 5c3ddcff3e
5 changed files with 41 additions and 1 deletions

View File

@@ -49,6 +49,7 @@ from .const import (
CONF_MAX_TOKENS,
CONF_PROMPT,
CONF_REASONING_EFFORT,
CONF_REASONING_SUMMARY,
CONF_RECOMMENDED,
CONF_TEMPERATURE,
CONF_TOP_P,
@@ -71,6 +72,7 @@ from .const import (
RECOMMENDED_IMAGE_MODEL,
RECOMMENDED_MAX_TOKENS,
RECOMMENDED_REASONING_EFFORT,
RECOMMENDED_REASONING_SUMMARY,
RECOMMENDED_TEMPERATURE,
RECOMMENDED_TOP_P,
RECOMMENDED_VERBOSITY,
@@ -399,10 +401,23 @@ class OpenAISubentryFlowHandler(ConfigSubentryFlow):
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(
CONF_REASONING_SUMMARY,
default=RECOMMENDED_REASONING_SUMMARY,
): SelectSelector(
SelectSelectorConfig(
options=["off", "auto", "short", "detailed"],
translation_key=CONF_REASONING_SUMMARY,
mode=SelectSelectorMode.DROPDOWN,
)
),
}
)
elif CONF_VERBOSITY in options:
options.pop(CONF_VERBOSITY)
if CONF_REASONING_SUMMARY in options:
if not model.startswith("gpt-5"):
options.pop(CONF_REASONING_SUMMARY)
if self._subentry_type == "conversation" and not model.startswith(
tuple(UNSUPPORTED_WEB_SEARCH_MODELS)

View File

@@ -19,6 +19,7 @@ CONF_FILENAMES = "filenames"
CONF_MAX_TOKENS = "max_tokens"
CONF_PROMPT = "prompt"
CONF_REASONING_EFFORT = "reasoning_effort"
CONF_REASONING_SUMMARY = "reasoning_summary"
CONF_RECOMMENDED = "recommended"
CONF_TEMPERATURE = "temperature"
CONF_TOP_P = "top_p"
@@ -36,6 +37,7 @@ RECOMMENDED_CHAT_MODEL = "gpt-4o-mini"
RECOMMENDED_IMAGE_MODEL = "gpt-image-1.5"
RECOMMENDED_MAX_TOKENS = 3000
RECOMMENDED_REASONING_EFFORT = "low"
RECOMMENDED_REASONING_SUMMARY = "auto"
RECOMMENDED_TEMPERATURE = 1.0
RECOMMENDED_TOP_P = 1.0
RECOMMENDED_VERBOSITY = "medium"

View File

@@ -73,6 +73,7 @@ from .const import (
CONF_IMAGE_MODEL,
CONF_MAX_TOKENS,
CONF_REASONING_EFFORT,
CONF_REASONING_SUMMARY,
CONF_TEMPERATURE,
CONF_TOP_P,
CONF_VERBOSITY,
@@ -90,6 +91,7 @@ from .const import (
RECOMMENDED_IMAGE_MODEL,
RECOMMENDED_MAX_TOKENS,
RECOMMENDED_REASONING_EFFORT,
RECOMMENDED_REASONING_SUMMARY,
RECOMMENDED_TEMPERATURE,
RECOMMENDED_TOP_P,
RECOMMENDED_VERBOSITY,
@@ -501,7 +503,9 @@ class OpenAIBaseLLMEntity(Entity):
)
if not model_args["model"].startswith("gpt-5-pro")
else "high", # GPT-5 pro only supports reasoning.effort: high
"summary": "auto",
"summary": options.get(
CONF_REASONING_SUMMARY, RECOMMENDED_REASONING_SUMMARY
),
}
model_args["include"] = ["reasoning.encrypted_content"]

View File

@@ -67,6 +67,7 @@
"image_model": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::image_model%]",
"inline_citations": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::inline_citations%]",
"reasoning_effort": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::reasoning_effort%]",
"reasoning_summary": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::reasoning_summary%]",
"search_context_size": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::search_context_size%]",
"user_location": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::user_location%]",
"web_search": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data::web_search%]"
@@ -76,6 +77,7 @@
"image_model": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::image_model%]",
"inline_citations": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::inline_citations%]",
"reasoning_effort": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::reasoning_effort%]",
"reasoning_summary": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::reasoning_summary%]",
"search_context_size": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::search_context_size%]",
"user_location": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::user_location%]",
"web_search": "[%key:component::openai_conversation::config_subentries::conversation::step::model::data_description::web_search%]"
@@ -125,6 +127,7 @@
"image_model": "Image generation model",
"inline_citations": "Include links in web search results",
"reasoning_effort": "Reasoning effort",
"reasoning_summary": "Reasoning summary",
"search_context_size": "Search context size",
"user_location": "Include home location",
"web_search": "Enable web search"
@@ -134,6 +137,7 @@
"image_model": "The model to use when generating images",
"inline_citations": "If disabled, additional prompt is added to ask the model to not include source citations",
"reasoning_effort": "How many reasoning tokens the model should generate before creating a response to the prompt",
"reasoning_summary": "Controls the length and detail of reasoning summaries provided by the model",
"search_context_size": "High level guidance for the amount of context window space to use for the search",
"user_location": "Refine search results based on geography",
"web_search": "Allow the model to search the web for the latest information before generating a response"
@@ -165,6 +169,14 @@
"xhigh": "X-High"
}
},
"reasoning_summary": {
"options": {
"auto": "[%key:common::state::auto%]",
"detailed": "Detailed",
"off": "[%key:common::state::off%]",
"short": "Short"
}
},
"search_context_size": {
"options": {
"high": "[%key:common::state::high%]",

View File

@@ -18,6 +18,7 @@ from homeassistant.components.openai_conversation.const import (
CONF_MAX_TOKENS,
CONF_PROMPT,
CONF_REASONING_EFFORT,
CONF_REASONING_SUMMARY,
CONF_RECOMMENDED,
CONF_TEMPERATURE,
CONF_TOP_P,
@@ -36,6 +37,7 @@ from homeassistant.components.openai_conversation.const import (
RECOMMENDED_AI_TASK_OPTIONS,
RECOMMENDED_CHAT_MODEL,
RECOMMENDED_MAX_TOKENS,
RECOMMENDED_REASONING_SUMMARY,
RECOMMENDED_TOP_P,
)
from homeassistant.const import CONF_API_KEY, CONF_LLM_HASS_API
@@ -536,6 +538,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "low",
CONF_REASONING_SUMMARY: "auto",
CONF_VERBOSITY: "high",
CONF_CODE_INTERPRETER: False,
CONF_WEB_SEARCH: False,
@@ -556,6 +559,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
},
{
CONF_REASONING_EFFORT: "minimal",
CONF_REASONING_SUMMARY: RECOMMENDED_REASONING_SUMMARY,
CONF_CODE_INTERPRETER: False,
CONF_VERBOSITY: "high",
CONF_WEB_SEARCH: False,
@@ -572,6 +576,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "minimal",
CONF_REASONING_SUMMARY: RECOMMENDED_REASONING_SUMMARY,
CONF_CODE_INTERPRETER: False,
CONF_VERBOSITY: "high",
CONF_WEB_SEARCH: False,
@@ -739,6 +744,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "low",
CONF_REASONING_SUMMARY: "auto",
CONF_CODE_INTERPRETER: True,
CONF_VERBOSITY: "medium",
CONF_WEB_SEARCH: True,
@@ -771,6 +777,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_CHAT_MODEL: "gpt-5-pro",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_SUMMARY: RECOMMENDED_REASONING_SUMMARY,
CONF_VERBOSITY: "medium",
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "high",