From 93b4b3ba7606e71601de2e326bd8bcc50c5898ab Mon Sep 17 00:00:00 2001
From: Junjie <DELL@qq.com>
Date: 星期五, 09 一月 2026 16:31:55 +0800
Subject: [PATCH] #

---
 src/main/java/com/zy/ai/service/LlmChatService.java |   17 ++++++++++++++---
 1 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/src/main/java/com/zy/ai/service/LlmChatService.java b/src/main/java/com/zy/ai/service/LlmChatService.java
index 431896c..ddb333a 100644
--- a/src/main/java/com/zy/ai/service/LlmChatService.java
+++ b/src/main/java/com/zy/ai/service/LlmChatService.java
@@ -38,6 +38,9 @@
     @Value("${llm.pythonPlatformUrl}")
     private String pythonPlatformUrl;
 
+    @Value("${llm.thinking}")
+    private String thinking;
+
     /**
      * 閫氱敤瀵硅瘽鏂规硶锛氫紶鍏� messages锛岃繑鍥炲ぇ妯″瀷鏂囨湰鍥炲
      */
@@ -84,13 +87,18 @@
                                                  Double temperature,
                                                  Integer maxTokens,
                                                  List<Object> tools) {
-
         ChatCompletionRequest req = new ChatCompletionRequest();
         req.setModel(model);
         req.setMessages(messages);
         req.setTemperature(temperature != null ? temperature : 0.3);
         req.setMax_tokens(maxTokens != null ? maxTokens : 1024);
         req.setStream(false);
+
+        if(thinking.equals("enable")) {
+            ChatCompletionRequest.Thinking thinking = new ChatCompletionRequest.Thinking();
+            thinking.setType("enable");
+            req.setThinking(thinking);
+        }
         if (tools != null && !tools.isEmpty()) {
             req.setTools(tools);
             req.setTool_choice("auto");
@@ -224,18 +232,21 @@
                                     Consumer<String> onChunk,
                                     Runnable onComplete,
                                     Consumer<Throwable> onError) {
-
         ChatCompletionRequest req = new ChatCompletionRequest();
         req.setModel(model);
         req.setMessages(messages);
         req.setTemperature(temperature != null ? temperature : 0.3);
         req.setMax_tokens(maxTokens != null ? maxTokens : 1024);
         req.setStream(true);
+        if(thinking.equals("enable")) {
+            ChatCompletionRequest.Thinking thinking = new ChatCompletionRequest.Thinking();
+            thinking.setType("enable");
+            req.setThinking(thinking);
+        }
         if (tools != null && !tools.isEmpty()) {
             req.setTools(tools);
             req.setTool_choice("auto");
         }
-
         Flux<String> flux = llmWebClient.post()
                 .uri("/chat/completions")
                 .header(HttpHeaders.AUTHORIZATION, "Bearer " + apiKey)

--
Gitblit v1.9.1