Ver Fonte

Merge branch 'bugfix0315' into AuraK-3.0

anhuiqiang há 1 semana atrás
pai
commit
88eae9034a

+ 3 - 0
.gitignore

@@ -54,3 +54,6 @@ server/check_models.js
 server/aurak.sqlite
 server/models_list.json
 server/models_status.json
+libreoffice-server/__pycache__/main.cpython-312.pyc
+nul
+.sisyphus/

+ 96 - 0
docs/DEVELOPMENT_STANDARDS.md

@@ -0,0 +1,96 @@
+# Development Standards
+
+## Code Language Requirements
+
+### 1. Comments
+- **All code comments must be in English**
+- Includes the following (not limited to):
+  - Function/method comments
+  - Inline comments
+  - Code block explanations
+  - TODO/FIXME comments
+
+### 2. Logging
+- **All log output must be in English**
+- Includes the following (not limited to):
+  - `logger.log()` info logs
+  - `logger.warn()` warning logs
+  - `logger.error()` error logs
+  - `console.log()` debug output
+
+### 3. Error Messages
+- **Error messages must support internationalization (i18n)**
+- **User-facing error messages**: Display in the user's selected language (Japanese/Chinese/English) via i18n system
+- **Debug/development error messages**: Display in the user's selected language via i18n system
+- **Exception messages**: Use i18n for internationalized error messages
+
+## Examples
+
+### Correct Comments and Logs (English + i18n)
+
+```typescript
+// Get embeddings for texts
+async getEmbeddings(texts: string[]): Promise<number[][]> {
+  this.logger.log(`Getting embeddings for ${texts.length} texts`);
+  
+  try {
+    // Call API to get embeddings
+    const response = await this.callEmbeddingAPI(texts);
+    return response.data;
+  } catch (error) {
+    this.logger.error('Failed to get embeddings', error);
+    // Use i18n for user-facing error messages
+    throw new Error(this.i18n.t('errors.embeddingGenerationFailed'));
+  }
+}
+```
+
+### Using i18n for Error Messages
+
+```typescript
+import { I18nService } from './i18n.service';
+
+async processDocument(file: Express.Multer.File) {
+  try {
+    // Process document...
+    return result;
+  } catch (error) {
+    // Error message in user's selected language
+    throw new Error(this.i18n.t('errors.documentProcessingFailed', {
+      filename: file.originalname
+    }));
+  }
+}
+```
+
+## Compliance Standards
+
+1. **During code reviews, always check the language of comments and logs**
+2. **New code must follow English comments and logs standards**
+3. **When refactoring existing code, update comments and logs to English simultaneously**
+4. **All error messages must use the i18n system for internationalization**
+
+## Validation i18n Rules
+
+### class-validator Limitation
+The `@MinLength`, `@MaxLength`, `@IsEmail`, etc. decorators from `class-validator` have a **static `message` property** that cannot access NestJS's `I18nService` at runtime. Therefore:
+
+- **DO NOT** use hardcoded messages in validation decorators like:
+  ```typescript
+  @MinLength(8, { message: 'Password must be at least 8 characters long' })
+  ```
+
+- **DO** perform validation in the controller layer with i18n support:
+  ```typescript
+  if (password.length < 6) {
+    throw new BadRequestException(this.i18nService.getErrorMessage('passwordMinLength'));
+  }
+  ```
+
+- **OR** remove the decorator and rely on controller-level validation only
+
+### Adding New Validation Rules
+When adding new validation to DTOs, ensure validation messages are internationalized by:
+1. Adding the i18n key to `server/src/i18n/messages.ts`
+2. Adding validation logic in the controller or service layer using `I18nService`
+

+ 38 - 37
libreoffice-server/main.py

@@ -1,14 +1,15 @@
-from fastapi import FastAPI, File, UploadFile, HTTPException
-from fastapi.responses import RedirectResponse, FileResponse
-from pydantic import BaseModel
-from typing import Optional
-import subprocess
+import io
 import os
+import subprocess
 import time
-from PIL import Image  # Pillowライブラリを追加
-import io
+from typing import Optional
+
+from fastapi import FastAPI, File, HTTPException, UploadFile
+from fastapi.responses import FileResponse, RedirectResponse
+from PIL import Image  # Pillow library for image processing
+from pydantic import BaseModel
 
-# レスポンスモデル
+# Response models
 class ConvertResponse(BaseModel):
     pdf_path: str
     converted: bool
@@ -22,10 +23,10 @@ class HealthResponse(BaseModel):
     version: str
     uptime: float
 
-# FastAPI アプリケーション
+# FastAPI Application
 app = FastAPI(
-    title="LibreOffice ドキュメント変換サービス",
-    description="Word/PPT/Excel/PDF を PDF に変換し、混合内容のドキュメント処理をサポートします",
+    title="LibreOffice Document Conversion Service",
+    description="Convert Word/PPT/Excel/PDF to PDF and support mixed content document processing",
     version="1.0.0",
     docs_url="/docs",
     redoc_url="/redoc"
@@ -35,12 +36,12 @@ start_time = time.time()
 
 @app.get("/", include_in_schema=False)
 async def root():
-    """ドキュメントページへリダイレクト"""
+    """Redirect to documentation page"""
     return RedirectResponse(url="/docs")
 
 @app.get("/health", response_model=HealthResponse)
 async def health():
-    """ヘルスチェックインターフェース"""
+    """Health check interface"""
     return HealthResponse(
         status="healthy",
         service="libreoffice-converter",
@@ -51,11 +52,11 @@ async def health():
 @app.post("/convert")
 async def convert(file: UploadFile = File(...)):
     """
-    ドキュメント変換インターフェース
-    戻り値: PDF ファイルストリーム
+    Document conversion interface
+    Returns: PDF file stream
     """
     try:
-        # ファイル形式の検証
+        # File format validation
         allowed_extensions = [
             '.pdf', '.doc', '.docx', '.ppt', '.pptx', '.xls', '.xlsx',
             '.md', '.txt', '.rtf', '.odt', '.ods', '.odp',
@@ -66,25 +67,25 @@ async def convert(file: UploadFile = File(...)):
         if file_ext not in allowed_extensions:
             raise HTTPException(
                 status_code=400,
-                detail=f"サポートされていないファイル形式です: {file_ext}。サポート対象: {', '.join(allowed_extensions)}"
+                detail=f"Unsupported file format: {file_ext}. Supported formats: {', '.join(allowed_extensions)}"
             )
 
-        # uploads ディレクトリの存在を確認
+        # Check uploads directory existence
         upload_dir = "/app/uploads" if os.path.exists("/app/uploads") else "./uploads"
         os.makedirs(upload_dir, exist_ok=True)
 
-        # アップロードファイルの保存
+        # Save uploaded file
         filepath = os.path.join(upload_dir, file.filename)
         with open(filepath, "wb") as buffer:
             content = await file.read()
             buffer.write(content)
 
-        # PDF の場合はそのまま返却
+        # For PDF files, return directly without conversion
         if file_ext == '.pdf':
             return FileResponse(filepath, filename=file.filename, media_type='application/pdf')
 
         if file_ext == '.md':
-            # Node.js スクリプトを使用して Markdown を PDF にレンダリング
+            # Use Node.js script to render Markdown to PDF
             expected_pdf = filepath.rsplit('.', 1)[0] + '.pdf'
             cmd = [
                 'node',
@@ -93,14 +94,14 @@ async def convert(file: UploadFile = File(...)):
                 expected_pdf
             ]
         elif file_ext in ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.webp']:
-            # 画像ファイルの場合は Pillow を使用して PDF に変換
+            # For image files, use Pillow to convert to PDF
             expected_pdf = filepath.rsplit('.', 1)[0] + '.pdf'
 
-            # 画像を開いてPDFとして保存
+            # Open image and save as PDF
             with Image.open(filepath) as img:
-                # RGBAモードの場合はRGBに変換(透明度がある画像対応)
+                # Convert RGBA mode to RGB (support for transparent images)
                 if img.mode in ('RGBA', 'LA', 'P'):
-                    # 白い背景に変換
+                    # Convert to white background
                     background = Image.new('RGB', img.size, (255, 255, 255))
                     if img.mode == 'P':
                         img = img.convert('RGBA')
@@ -109,21 +110,21 @@ async def convert(file: UploadFile = File(...)):
                 elif img.mode != 'RGB':
                     img = img.convert('RGB')
 
-                # PDFとして保存
+                # Save as PDF
                 img.save(expected_pdf, 'PDF', resolution=100.0, save_all=False)
 
-            # PDF生成が完了したことを確認
+            # Verify PDF generation completed
             if not os.path.exists(expected_pdf):
                 raise HTTPException(
                     status_code=500,
-                    detail="画像からPDFへの変換は成功しましたが、出力ファイルが見つかりません"
+                    detail="Image to PDF conversion succeeded but output file not found"
                 )
             
-            # 画像変換完了、PDFファイルを返却
+            # Image conversion completed, return PDF file
             filename_base = os.path.splitext(file.filename)[0]
             return FileResponse(expected_pdf, filename=f"{filename_base}.pdf", media_type='application/pdf')
         else:
-            # LibreOffice による変換
+            # Conversion using LibreOffice
             cmd = [
                 'soffice',
                 '--headless',
@@ -136,7 +137,7 @@ async def convert(file: UploadFile = File(...)):
             cmd,
             capture_output=True,
             text=True,
-            timeout=600,  # 複雑なMarkdown変換をサポートするために10分に延長
+            timeout=600,  # Extended to 10 minutes to support complex Markdown conversion
         )
 
         # Combine stdout and stderr for error reporting since capture_output uses PIPE
@@ -144,7 +145,7 @@ async def convert(file: UploadFile = File(...)):
         if result.stderr:
             combined_output += "\n" + result.stderr
 
-        # Node.jsスクリプトの実際の出力を表示して、デバッグ
+        # Display Node.js script output for debugging
         print(f"Node.js script output: {combined_output}")
 
         if result.returncode != 0:
@@ -158,15 +159,15 @@ async def convert(file: UploadFile = File(...)):
             print(f"Subprocess output: {combined_output}")
             raise HTTPException(
                 status_code=500,
-                detail=f"変換に失敗しました: {combined_output}"
+                detail=f"Conversion failed: {combined_output}"
             )
 
-        # 出力ファイルの確認
+        # Verify output file
         expected_pdf = filepath.rsplit('.', 1)[0] + '.pdf'
         if not os.path.exists(expected_pdf):
             raise HTTPException(
                 status_code=500,
-                detail="変換は成功しましたが、出力ファイルが見つかりません"
+                detail="Conversion succeeded but output file not found"
             )
 
         filename_base = os.path.splitext(file.filename)[0]
@@ -175,13 +176,13 @@ async def convert(file: UploadFile = File(...)):
     except HTTPException:
         raise
     except subprocess.TimeoutExpired:
-        raise HTTPException(status_code=504, detail="変換タイムアウト (300秒)")
+        raise HTTPException(status_code=504, detail="Conversion timeout (300 seconds)")
     except Exception as e:
         raise HTTPException(status_code=500, detail=str(e))
 
 @app.get("/version")
 async def version():
-    """バージョン情報"""
+    """Version information"""
     return {
         "service": "libreoffice-converter",
         "version": "1.0.0",

+ 25 - 22
server/src/chat/chat.service.ts

@@ -69,7 +69,7 @@ export class ChatService {
     console.log('User ID:', userId);
     console.log('User language:', userLanguage);
     console.log('Selected embedding model ID:', selectedEmbeddingId);
-    console.log('Selected group:', selectedGroups);
+    console.log('Selected groups:', selectedGroups);
     console.log('Selected files:', selectedFiles);
     console.log('History ID:', historyId);
     console.log('Temperature:', temperature);
@@ -284,13 +284,14 @@ export class ChatService {
     instruction: string,
     context: string,
     modelConfig: ModelConfig,
+    userLanguage: string = DEFAULT_LANGUAGE,
   ): AsyncGenerator<{ type: 'content'; data: any }> {
     try {
       this.logger.log(this.i18nService.formatMessage('modelCall', {
         type: 'LLM (Assist)',
         model: `${modelConfig.name} (${modelConfig.modelId})`,
         user: 'N/A'
-      }, 'ja'));
+      }, userLanguage));
       const llm = new ChatOpenAI({
         apiKey: modelConfig.apiKey || 'ollama',
         streaming: true,
@@ -301,14 +302,13 @@ export class ChatService {
         },
       });
 
-      const systemPrompt = `${this.i18nService.getMessage('intelligentAssistant', 'ja')}
-提供されたテキスト内容を、ユーザーの指示に基づいて修正または改善please。
-挨拶や結びの言葉(「わかりました、こちらが...」etc.)は含めず、修正後の内容のみを直接出力please。
+      const systemPrompt = `${this.i18nService.getMessage('intelligentAssistant', userLanguage)}
+${this.i18nService.getMessage('assistSystemPrompt', userLanguage)}
 
-Context (current contents):
+${this.i18nService.getMessage('contextLabel', userLanguage)}:
 ${context}
 
-User instructions:
+${this.i18nService.getMessage('userInstructionLabel', userLanguage)}:
 ${instruction}`;
 
       const stream = await llm.stream(systemPrompt);
@@ -319,8 +319,8 @@ ${instruction}`;
         }
       }
     } catch (error) {
-      this.logger.error(this.i18nService.getMessage('assistStreamError', 'ja'), error);
-      yield { type: 'content', data: `${this.i18nService.getMessage('error', 'ja')}: ${error.message}` };
+      this.logger.error(this.i18nService.getMessage('assistStreamError', userLanguage), error);
+      yield { type: 'content', data: `${this.i18nService.getMessage('error', userLanguage)}: ${error.message}` };
     }
   }
 
@@ -331,30 +331,31 @@ ${instruction}`;
     selectedGroups?: string[], // New parameter
     explicitFileIds?: string[], // New parameter
     tenantId?: string, // Added
+    userLanguage: string = DEFAULT_LANGUAGE,
   ): Promise<any[]> {
     try {
       // Join keywords into search string
       const combinedQuery = keywords.join(' ');
-      console.log(this.i18nService.getMessage('searchString', 'ja') + combinedQuery);
+      console.log(this.i18nService.getMessage('searchString', userLanguage) + combinedQuery);
 
       // Check if embedding model ID is provided
       if (!embeddingModelId) {
-        console.log(this.i18nService.getMessage('embeddingModelIdNotProvided', 'ja'));
+        console.log(this.i18nService.getMessage('embeddingModelIdNotProvided', userLanguage));
         return [];
       }
 
       // Use actual embedding vector
-      console.log(this.i18nService.getMessage('generatingEmbeddings', 'ja'));
+      console.log(this.i18nService.getMessage('generatingEmbeddings', userLanguage));
       const queryEmbedding = await this.embeddingService.getEmbeddings(
         [combinedQuery],
         userId,
         embeddingModelId,
       );
       const queryVector = queryEmbedding[0];
-      console.log(this.i18nService.getMessage('embeddingsGenerated', 'ja') + this.i18nService.getMessage('dimensions', 'ja') + ':', queryVector.length);
+      console.log(this.i18nService.getMessage('embeddingsGenerated', userLanguage) + this.i18nService.getMessage('dimensions', userLanguage) + ':', queryVector.length);
 
       // Hybrid search
-      console.log(this.i18nService.getMessage('performingHybridSearch', 'ja'));
+      console.log(this.i18nService.getMessage('performingHybridSearch', userLanguage));
       const results = await this.elasticsearchService.hybridSearch(
         queryVector,
         combinedQuery,
@@ -363,13 +364,13 @@ ${instruction}`;
         0.6,
         selectedGroups, // Pass selected groups
         explicitFileIds, // Pass explicit file IDs
-        tenantId, // Added: tenantId
+        tenantId, // Pass tenant ID
       );
-      console.log(this.i18nService.getMessage('esSearchCompleted', 'ja') + this.i18nService.getMessage('resultsCount', 'ja') + ':', results.length);
+      console.log(this.i18nService.getMessage('esSearchCompleted', userLanguage) + this.i18nService.getMessage('resultsCount', userLanguage) + ':', results.length);
 
       return results.slice(0, 10);
     } catch (error) {
-      console.error(this.i18nService.getMessage('hybridSearchFailed', 'ja') + ':', error);
+      console.error(this.i18nService.getMessage('hybridSearchFailed', userLanguage) + ':', error);
       return [];
     }
   }
@@ -398,7 +399,7 @@ ${instruction}`;
       )
       .join('\n');
   }
-  async getContextForTopic(topic: string, userId: string, tenantId?: string, groupId?: string, fileIds?: string[]): Promise<string> {
+  async getContextForTopic(topic: string, userId: string, tenantId?: string, groupId?: string, fileIds?: string[], userLanguage: string = DEFAULT_LANGUAGE): Promise<string> {
     try {
       // Use organization's default embedding from Index Chat Config (strict)
       const embeddingModel = await this.modelConfigService.findDefaultByType(tenantId || 'default', ModelType.EMBEDDING);
@@ -409,12 +410,13 @@ ${instruction}`;
         embeddingModel.id,
         groupId ? [groupId] : undefined,
         fileIds,
-        tenantId
+        tenantId,
+        userLanguage,
       );
 
       return this.buildContext(results);
     } catch (err) {
-      this.logger.error(`${this.i18nService.getMessage('getContextForTopicFailed', 'ja')}: ${err.message}`);
+      this.logger.error(`${this.i18nService.getMessage('getContextForTopicFailed', userLanguage)}: ${err.message}`);
       return '';
     }
   }
@@ -424,6 +426,7 @@ ${instruction}`;
     userId: string,
     tenantId?: string,
     modelConfig?: ModelConfig, // Optional, looks up if not provided
+    userLanguage: string = DEFAULT_LANGUAGE,
   ): Promise<string> {
     try {
       let config = modelConfig;
@@ -454,13 +457,13 @@ ${instruction}`;
 
       return String(response.content);
     } catch (error) {
-      this.logger.error(this.i18nService.getMessage('simpleChatGenerationError', 'ja'), error);
+      this.logger.error(this.i18nService.getMessage('simpleChatGenerationError', userLanguage), error);
       throw error;
     }
   }
 
   /**
-   * 対話内容に基づいてチャットのタイトルを自動生成する
+   * Automatically generate chat title based on conversation content
    */
   async generateChatTitle(historyId: string, userId: string, tenantId?: string): Promise<string | null> {
     this.logger.log(`Generating automatic title for chat session ${historyId}`);

+ 2 - 2
server/src/common/constants.ts

@@ -17,8 +17,8 @@ export const MAX_FILE_SIZE = 100 * 1024 * 1024; // 100MB
 
 export const DEFAULT_MAX_BATCH_SIZE = 2048;
 
-
-export const DEFAULT_LANGUAGE = 'ja';
+// デフォルト言語
+export const DEFAULT_LANGUAGE = 'zh';
 
 // システム全体の共通テナントID(シードデータetc.で使用)
 export const GLOBAL_TENANT_ID = '00000000-0000-0000-0000-000000000000';

+ 3 - 2
server/src/i18n/i18n.service.ts

@@ -1,10 +1,11 @@
 import { Injectable } from '@nestjs/common';
 import { errorMessages, logMessages, statusMessages } from './messages';
 import { i18nStore } from './i18n.store';
+import { DEFAULT_LANGUAGE } from '../common/constants'; // 使用常量定义的默认语言
 
 @Injectable()
 export class I18nService {
-  private readonly defaultLanguage = 'ja'; // プロジェクト要件に従い、Japaneseをデフォルトとして使用
+  private readonly defaultLanguage = DEFAULT_LANGUAGE; // 使用常量定义的默认语言
 
   private getLanguage(lang?: string): string {
     if (lang) return lang;
@@ -62,7 +63,7 @@ export class I18nService {
   // システムプロンプトを取得
   getPrompt(lang: string = this.defaultLanguage, type: 'withContext' | 'withoutContext' = 'withContext', hasKnowledgeGroup: boolean = false): string {
     const language = this.getLanguage(lang);
-    const noMatchMsg = statusMessages[language]?.noMatchInKnowledgeGroup || statusMessages['ja'].noMatchInKnowledgeGroup;
+    const noMatchMsg = statusMessages[language]?.noMatchInKnowledgeGroup || statusMessages[this.defaultLanguage].noMatchInKnowledgeGroup;
 
     if (language === 'zh') {
       return type === 'withContext' ? `

+ 359 - 7
server/src/i18n/messages.ts

@@ -55,6 +55,7 @@ export const errorMessages = {
     promptRequired: '提示词是必填项',
     addLLMConfig: '请在系统设置中添加 LLM 模型',
     visionAnalysisFailed: '视觉分析失败: {message}',
+    visionSystemPrompt: '您是专业的文档分析助手。请分析此文档图像,并按以下要求以 JSON 格式返回:\n\n1. 提取所有可读文本(按阅读顺序,保持段落和格式)\n2. 识别图像/图表/表格(描述内容、含义和作用)\n3. 分析页面布局(仅文本/文本和图像混合/表格/图表等)\n4. 评估分析质量 (0-1)\n\n响应格式:\n{\n  "text": "完整的文本内容",\n  "images": [\n    {"type": "图表类型", "description": "详细描述", "position": 1}\n  ],\n  "layout": "布局说明",\n  "confidence": 0.95\n}',
     retryMechanismError: '重试机制异常',
     imageLoadError: '无法读取图像: {message}',
     groupNotFound: '分组不存在',
@@ -132,6 +133,7 @@ export const errorMessages = {
     promptRequired: 'プロンプトは必須is',
     addLLMConfig: 'システム設定で LLM モデルを追加please',
     visionAnalysisFailed: 'ビジョン分析に失敗しました: {message}',
+    visionSystemPrompt: 'あなたはプロフェッショナルなドキュメント分析アシスタントです。このドキュメント画像を分析し、以下の要求に従ってJSON形式で返してください:\n\n1. すべての可読テキストを抽出(読み順で、段落と書式を維持)\n2. 画像/グラフ/テーブルを識別(内容、意味、役割を説明)\n3. ページレイアウトを分析(テキストのみ/テキストと画像混在/テーブル/グラフなど)\n4. 分析品質を評価 (0-1)\n\nレスポンス形式:\n{\n  "text": "完全なテキスト内容",\n  "images": [\n    {"type": "グラフタイプ", "description": "詳細説明", "position": 1}\n  ],\n  "layout": "レイアウト説明",\n  "confidence": 0.95\n}',
     retryMechanismError: '再試行メカニズムの異常',
     imageLoadError: '画像を読み込めません: {message}',
     groupNotFound: 'グループが存在しません',
@@ -210,6 +212,7 @@ export const errorMessages = {
     promptRequired: 'Prompt is required',
     addLLMConfig: 'Please add LLM model in system settings',
     visionAnalysisFailed: 'Vision analysis failed: {message}',
+    visionSystemPrompt: 'You are a professional document analysis assistant. Please analyze this document image and return in JSON format according to the following requirements:\n\n1. Extract all readable text (in reading order, preserving paragraphs and formatting)\n2. Identify images/charts/tables (describe content, meaning, and purpose)\n3. Analyze page layout (text only/mixed text and images/tables/charts, etc.)\n4. Evaluate analysis quality (0-1)\n\nResponse format:\n{\n  "text": "complete text content",\n  "images": [\n    {"type": "chart type", "description": "detailed description", "position": 1}\n  ],\n  "layout": "layout description",\n  "confidence": 0.95\n}',
     retryMechanismError: 'Retry mechanism error',
     imageLoadError: 'Cannot load image: {message}',
     groupNotFound: 'Group not found',
@@ -244,19 +247,368 @@ export const errorMessages = {
 };
 
 export const logMessages = {
-  zh: {},
-  ja: {},
-  en: {},
+  zh: {
+    processingFile: '处理文件: {name} ({size})',
+    indexingComplete: '索引完成: {id}',
+    vectorizingFile: '向量化文件: ',
+    searchQuery: '搜索查询: ',
+    modelCall: '[模型调用] 类型: {type}, 模型: {model}, 用户: {user}',
+    memoryStatus: '内存状态: ',
+    uploadSuccess: '文件上传成功。正在后台索引',
+    overlapAdjusted: '重叠大小超过切片大小的50%。已自动调整为 {newSize}',
+    environmentLimit: '环境变量限制',
+    modelLimit: '模型限制',
+    configLoaded: '数据库模型配置加载: {name} ({id})',
+    batchSizeAdjusted: '批量大小从 {old} 调整为 {new} (模型限制: {limit})',
+    dimensionMismatch: '模型 {id} 维度不匹配: 预期 {expected}, 实际 {actual}',
+    searchMetadataFailed: '为用户 {userId} 搜索知识库失败',
+    extractedTextTooLarge: '抽出されたテキストが大きいです: {size}MB',
+    preciseModeUnsupported: '格式 {ext} 不支持精密模式,回退到快速模式',
+    visionModelNotConfiguredFallback: '未配置视觉模型,回退到快速模式',
+    visionModelInvalidFallback: '视觉模型配置无效,回退到快速模式',
+    visionPipelineFailed: '视觉流水线失败,回退到快速模式',
+    preciseModeComplete: '精密模式提取完成: {pages}页, 费用: ${cost}',
+    skippingEmptyVectorPage: '跳过第 {page} 页(空向量)',
+    pdfPageImageError: '获取 PDF 页面图像失败: {message}',
+    internalServerError: '服务器内部错误',
+  },
+  ja: {
+    processingFile: 'ファイル処理中: {name} ({size})',
+    indexingComplete: 'インデックス完了: {id}',
+    vectorizingFile: 'ファイルベクトル化中: ',
+    searchQuery: '検索クエリ: ',
+    modelCall: '[モデル呼び出し] タイプ: {type}, モデル: {model}, ユーザー: {user}',
+    memoryStatus: 'メモリ状態: ',
+    uploadSuccess: 'ファイルが正常にアップロードされました。バックグラウンドでインデックス処理を実行中です',
+    overlapAdjusted: 'オーバーラップサイズがチャンクサイズの50%を超えています。自動的に {newSize} に調整されました',
+    environmentLimit: '環境変数の制限',
+    modelLimit: 'モデルの制限',
+    configLoaded: 'データベースからモデル設定を読み込みました: {name} ({id})',
+    batchSizeAdjusted: 'バッチサイズを {old} から {new} に調整しました (モデル制限: {limit})',
+    dimensionMismatch: 'モデル {id} の次元が一致しません: 期待値 {expected}, 実際 {actual}',
+    searchMetadataFailed: 'ユーザー {userId} のナレッジベース検索に失敗しました',
+    extractedTextTooLarge: '抽出されたテキストが大きいです: {size}MB',
+    preciseModeUnsupported: 'ファイル形式 {ext} は精密モードをサポートしていません。高速モードにフォールバックします',
+    visionModelNotConfiguredFallback: 'ビジョンモデルが設定されていません。高速モードにフォールバックします',
+    visionModelInvalidFallback: 'ビジョンモデルの設定が無効です。高速モードにフォールバックします',
+    visionPipelineFailed: 'ビジョンパイプラインが失敗しました。高速モードにフォールバックします',
+    preciseModeComplete: '精密モード内容抽出完了: {pages}ページ, コスト: ${cost}',
+    skippingEmptyVectorPage: '第 {page} ページの空ベクトルをスキップします',
+    pdfPageImageError: 'PDF ページの画像取得に失敗しました: {message}',
+    internalServerError: 'サーバー内部エラー',
+  },
+  en: {
+    processingFile: 'Processing file: {name} ({size})',
+    indexingComplete: 'Indexing complete: {id}',
+    vectorizingFile: 'Vectorizing file: ',
+    searchQuery: 'Search query: ',
+    modelCall: '[Model call] Type: {type}, Model: {model}, User: {user}',
+    memoryStatus: 'Memory status: ',
+    uploadSuccess: 'File uploaded successfully. Indexing in background',
+    overlapAdjusted: 'Overlap size exceeds 50% of chunk size. Auto-adjusted to {newSize}',
+    environmentLimit: 'Environment variable limit',
+    modelLimit: 'Model limit',
+    configLoaded: 'Model config loaded from DB: {name} ({id})',
+    batchSizeAdjusted: 'Batch size adjusted from {old} to {new} (Model limit: {limit})',
+    dimensionMismatch: 'Model {id} dimension mismatch: Expected {expected}, Actual {actual}',
+    searchMetadataFailed: 'Failed to search knowledge base for user {userId}',
+    extractedTextTooLarge: 'Extracted text is too large: {size}MB',
+    preciseModeUnsupported: 'Format {ext} not supported for precise mode. Falling back to fast mode',
+    visionModelNotConfiguredFallback: 'Vision model not configured. Falling back to fast mode',
+    visionModelInvalidFallback: 'Vision model config invalid. Falling back to fast mode',
+    visionPipelineFailed: 'Vision pipeline failed. Falling back to fast mode',
+    preciseModeComplete: 'Precise mode extraction complete: {pages} pages, cost: ${cost}',
+    skippingEmptyVectorPage: 'Skipping page {page} due to empty vector',
+    pdfPageImageError: 'Failed to retrieve PDF page image: {message}',
+    internalServerError: 'Internal server error',
+  }
 };
 
 export const statusMessages = {
   zh: {
-    noMatchInKnowledgeGroup: '所选知识组中未找到相关内容',
+    searching: '正在搜索知识库...',
+    noResults: '未找到相关知识,将基于一般知识回答...',
+    searchFailed: '知识库搜索失败,将基于一般知识回答...',
+    generatingResponse: '正在生成回答',
+    files: '个文件',
+    notebooks: '个笔记本',
+    all: '全部',
+    items: '个',
+    searchResults: '搜索结果',
+    relevantInfoFound: '条相关信息找到',
+    searchHits: '搜索命中',
+    relevance: '相关度',
+    sourceFiles: '源文件',
+    searchScope: '搜索范围',
+    error: '错误',
+    creatingHistory: '创建新对话历史: ',
+    searchingModelById: '根据ID搜索模型: ',
+    searchModelFallback: '未找到指定的嵌入模型。使用第一个可用模型。',
+    noEmbeddingModelFound: '找不到嵌入模型设置',
+    usingEmbeddingModel: '使用的嵌入模型: ',
+    startingSearch: '开始搜索知识库...',
+    searchResultsCount: '搜索结果数: ',
+    searchFailedLog: '搜索失败',
+    modelCall: '[模型调用]',
+    chatStreamError: '聊天流错误',
+    assistStreamError: '辅助流错误',
+    file: '文件',
+    content: '内容',
+    userLabel: '用户',
+    assistantLabel: '助手',
+    intelligentAssistant: '您是智能写作助手。',
+    assistSystemPrompt: '请根据用户的指示修正或改进提供的文本内容。不要包含问候语或结束语(如"明白了,这是..."等),直接输出修正后的内容。',
+    contextLabel: '上下文(当前内容)',
+    userInstructionLabel: '用户指示',
+    searchString: '搜索字符串: ',
+    embeddingModelIdNotProvided: '未提供嵌入模型ID',
+    generatingEmbeddings: '生成嵌入向量...',
+    embeddingsGenerated: '嵌入向量生成完成',
+    dimensions: '维度',
+    performingHybridSearch: '执行混合搜索...',
+    esSearchCompleted: 'ES搜索完成',
+    resultsCount: '结果数',
+    hybridSearchFailed: '混合搜索失败',
+    getContextForTopicFailed: '获取主题上下文失败',
+    noLLMConfigured: '用户未配置LLM模型',
+    simpleChatGenerationError: '简单聊天生成错误',
+    noMatchInKnowledgeGroup: '所选知识组中未找到相关内容,以下是基于模型的一般性回答:',
+    uploadTextSuccess: '笔记内容已接收。正在后台索引',
+    passwordChanged: '密码已成功修改',
+    userCreated: '用户已成功创建',
+    userInfoUpdated: '用户信息已更新',
+    userDeleted: '用户已删除',
+    pdfNoteTitle: 'PDF 笔记 - {date}',
+    noTextExtracted: '未提取到文本',
+    kbCleared: '知识库已清空',
+    fileDeleted: '文件已删除',
+    pageImageNotFoundDetail: '无法获取 PDF 第 {page} 页’的图像',
+    groupSyncSuccess: '文件分组已更新',
+    fileDeletedFromGroup: '文件已从分组中删除',
+    chunkConfigCorrection: '切片配置已修正: {warnings}',
+    noChunksGenerated: '文件 {id} 未生成任何切片',
+    chunkCountAnomaly: '实际切片数 {actual} 大幅超过预计值 {estimated},可能存在异常',
+    batchSizeExceeded: '批次 {index} 的大小 {actual} 超过推荐值 {limit},将拆分处理',
+    skippingEmptyVectorChunk: '跳过文本块 {index} (空向量)',
+    contextLengthErrorFallback: '批次处理发生上下文长度错误,降级到逐条处理模式',
+    chunkLimitExceededForceBatch: '切片数 {actual} 超过模型批次限制 {limit},强制进行批次处理',
+    noteContentRequired: '笔记内容是必填项',
+    imageAnalysisStarted: '正在使用模型 {id} 分析图像...',
+    batchAnalysisStarted: '正在分析 {count} 张图像...',
+    pageAnalysisFailed: '第 {page} 页分析失败',
+    visionSystemPrompt: '您是专业的文档分析助手。请分析此文档图像,并按以下要求以 JSON 格式返回:\n\n1. 提取所有可读文本(按阅读顺序,保持段落和格式)\n2. 识别图像/图表/表格(描述内容、含义和作用)\n3. 分析页面布局(仅文本/文本和图像混合/表格/图表等)\n4. 评估分析质量 (0-1)\n\n响应格式:\n{\n  "text": "完整的文本内容",\n  "images": [\n    {"type": "图表类型", "description": "详细描述", "position": 1}\n  ],\n  "layout": "布局说明",\n  "confidence": 0.95\n}',
+    visionModelCall: '[模型调用] 类型: Vision, 模型: {model}, 页面: {page}',
+    visionAnalysisSuccess: '✅ 视觉分析完成: {path}{page}, 文本长度: {textLen}, 图像数: {imgCount}, 布局: {layout}, 置信度: {confidence}%',
+    conversationHistoryNotFound: '对话历史不存在',
+    batchContextLengthErrorFallback: '小文件批次处理发生上下文长度错误,降级到逐条处理模式',
+    chunkProcessingFailed: '处理文本块 {index} 失败,已跳过: {message}',
+    singleTextProcessingComplete: '逐条文本处理完成: {count} 个切片',
+    fileVectorizationComplete: '文件 {id} 向量化完成。共处理 {count} 个文本块。最终内存: {memory}MB',
+    fileVectorizationFailed: '文件 {id} 向量化失败',
+    batchProcessingStarted: '开始批次处理: {count} 个项目',
+    batchProcessingProgress: '正在处理批次 {index}/{total}: {count} 个项目',
+    batchProcessingComplete: '批次处理完成: {count} 个项目,耗时 {duration}s',
+    onlyFailedFilesRetryable: '仅允许重试失败的文件 (当前状态: {status})',
+    emptyFileRetryFailed: '文件内容为空,无法重试。请重新上传文件。',
+    ragSystemPrompt: '您是专业的知识库助手。请根据以下提供的文档内容回答用户的问题。',
+    ragRules: '## 规则:\n1. 仅根据提供的文档内容进行回答,请勿编造信息。\n2. 如果文档中没有相关信息,请告知用户。\n3. 请在回答中注明信息来源。格式:[文件名.扩展子]\n4. 如果多个文档中的信息存在矛盾,请进行综合分析或解释不同的观点。\n5. 请使用{lang}进行回答。',
+    ragDocumentContent: '## 文档内容:',
+    ragUserQuestion: '## 用户问题:',
+    ragAnswer: '## 回答:',
+    ragSource: '### 来源:{fileName}',
+    ragSegment: '片段 {index} (相似度: {score}):',
+    ragNoDocumentFound: '未找到相关文档。',
+    queryExpansionPrompt: '您是一个搜索助手。请为以下用户查询生成3个不同的演变版本,以帮助在向量搜索中获得更好的结果。每个版本应包含不同的关键词或表达方式,但保持原始意思。直接输出3行查询,不要有数字或编号:\n\n查询:{query}',
+    hydePrompt: '请为以下用户问题写一段简短、事实性的假设回答(约100字)。不要包含任何引导性文字(如“基于我的分析...”),直接输出答案内容。\n\n问题:{query}',
   },
   ja: {
-    noMatchInKnowledgeGroup: '選択された知識グループに関連する内容が見つかりませんでした',
+    searching: 'ナレッジベースを検索中...',
+    noResults: '関連する知識が見つかりませんでした。一般的な知識に基づいて回答します...',
+    searchFailed: 'ナレッジベース検索に失敗しました。一般的な知識に基づいて回答します...',
+    generatingResponse: '回答を生成中',
+    files: '個のファイル',
+    notebooks: '個のノートブック',
+    all: 'すべて',
+    items: '件',
+    searchResults: '検索結果',
+    relevantInfoFound: '件の関連情報が見つかりました',
+    searchHits: '検索ヒット',
+    relevance: '関連度',
+    sourceFiles: '元ファイル',
+    searchScope: '検索範囲',
+    error: 'エラー',
+    creatingHistory: '新規対話履歴を作成: ',
+    searchingModelById: 'selectedEmbeddingId に基づいてモデルを検索: ',
+    searchModelFallback: '指定された埋め込みモデルが見つかりません。最初に使用可能なモデルを使用します。',
+    noEmbeddingModelFound: '埋め込みモデルの設定が見つかりません',
+    usingEmbeddingModel: '使用する埋め込みモデル: ',
+    startingSearch: 'ナレッジベースの検索を開始...',
+    searchResultsCount: '検索結果数: ',
+    searchFailedLog: '検索失敗',
+    chatStreamError: 'チャットストリームエラー',
+    assistStreamError: 'アシストストリームエラー',
+    file: 'ファイル',
+    content: '内容',
+    userLabel: 'ユーザー',
+    assistantLabel: 'アシスタント',
+    intelligentAssistant: 'あなたはインテリジェントな執筆アシスタントです。',
+    assistSystemPrompt: '提供されたテキスト内容を、ユーザーの指示に基づいて修正または改善してください。挨拶や結びの言葉(「わかりました、こちらが...」など)は含めず、修正後の内容のみを直接出力してください。',
+    contextLabel: 'コンテキスト(現在の内容)',
+    userInstructionLabel: 'ユーザーの指示',
+    searchString: '検索文字列: ',
+    embeddingModelIdNotProvided: '埋め込みモデルIDが提供されていません',
+    generatingEmbeddings: '埋め込みベクトルを生成中...',
+    embeddingsGenerated: '埋め込みベクトルの生成が完了しました',
+    dimensions: '次元数',
+    performingHybridSearch: 'ES 混合検索を実行中...',
+    esSearchCompleted: 'ES 検索が完了しました',
+    resultsCount: '結果数',
+    hybridSearchFailed: '混合検索に失敗しました',
+    getContextForTopicFailed: 'トピックのコンテキスト取得に失敗しました',
+    noLLMConfigured: 'ユーザーにLLMモデルが設定されていません',
+    simpleChatGenerationError: '簡易チャット生成エラー',
+    noMatchInKnowledgeGroup: '選択された知識グループに関連する内容が見つかりませんでした。以下はモデルに基づく一般的な回答です:',
+    uploadTextSuccess: 'ノート内容を受け取りました。バックグラウンドでインデックス処理を実行中です',
+    passwordChanged: 'パスワードが正常に変更されました',
+    userCreated: 'ユーザーが正常に作成されました',
+    userInfoUpdated: 'ユーザー情報が更新されました',
+    userDeleted: 'ユーザーが削除されました',
+    pdfNoteTitle: 'PDF ノート - {date}',
+    noTextExtracted: 'テキストが抽出されませんでした',
+    kbCleared: 'ナレッジベースが空になりました',
+    fileDeleted: 'ファイルが削除されました',
+    pageImageNotFoundDetail: 'PDF の第 {page} ページの画像を取得できません',
+    groupSyncSuccess: 'ファイルグループが更新されました',
+    fileDeletedFromGroup: 'ファイルがグループから削除されました',
+    chunkConfigCorrection: 'チャンク設定の修正: {warnings}',
+    noChunksGenerated: 'ファイル {id} からテキストチャンクが生成されませんでした',
+    chunkCountAnomaly: '実際のチャンク数 {actual} が推定値 {estimated} を大幅に超えています。異常がある可能性があります',
+    batchSizeExceeded: 'バッチ {index} のサイズ {actual} が推奨値 {limit} を超えています。分割して処理します',
+    skippingEmptyVectorChunk: '空ベクトルのテキストブロック {index} をスキップします',
+    contextLengthErrorFallback: 'バッチ処理でコンテキスト長エラーが発生しました。単一テキスト処理モードにダウングレードします',
+    chunkLimitExceededForceBatch: 'チャンク数 {actual} がモデルのバッチ制限 {limit} を超えています。強制的にバッチ処理を行います',
+    noteContentRequired: 'ノート内容は必須です',
+    imageAnalysisStarted: 'モデル {id} で画像を分析中...',
+    batchAnalysisStarted: '{count} 枚の画像を分析中...',
+    pageAnalysisFailed: '第 {page} ページの分析に失敗しました',
+    visionSystemPrompt: 'あなたは専門的なドキュメント分析アシスタントです。このドキュメント画像を分析し、以下の要求に従って JSON 形式で返してください:\n\n1. すべての読み取り可能なテキストを抽出(読み取り順序に従い、段落と形式を保持)\n2. 画像/グラフ/表の識別(内容、意味、役割を記述)\n3. ページレイアウトの分析(テキストのみ/テキストと画像の混合/表/グラフなど)\n4. 分析品質の評価(0-1)\n\nレスポンス形式:\n{\n  "text": "完全なテキスト内容",\n  "images": [\n    {"type": "グラフの種類", "description": "詳細な記述", "position": 1}\n  ],\n  "layout": "レイアウトの説明",\n  "confidence": 0.95\n}',
+    visionModelCall: '[モデル呼び出し] タイプ: Vision, モデル: {model}, ページ: {page}',
+    visionAnalysisSuccess: '✅ Vision 分析完了: {path}{page}, テキスト長: {textLen}文字, 画像数: {imgCount}, レイアウト: {layout}, 信頼度: {confidence}%',
+    conversationHistoryNotFound: '会話履歴が存在しません',
+    batchContextLengthErrorFallback: '小ファイルバッチ処理でコンテキスト長エラーが発生しました。単一テキスト処理モードにダウングレードします',
+    chunkProcessingFailed: 'テキストブロック {index} の処理に失敗しました。スキップします: {message}',
+    singleTextProcessingComplete: '単一テキスト処理完了: {count} チャンク',
+    fileVectorizationComplete: 'ファイル {id} ベクトル化完了。{count} 個のテキストブロックを処理しました。最終メモリ: {memory}MB',
+    fileVectorizationFailed: 'ファイル {id} ベクトル化失敗',
+    batchProcessingStarted: 'バッチ処理を開始します: {count} アイテム',
+    batchProcessingProgress: 'バッチ {index}/{total} を処理中: {count} 個のアイテム',
+    batchProcessingComplete: 'バッチ処理完了: {count} アイテム, 所要時間 {duration}s',
+    onlyFailedFilesRetryable: '失敗したファイルのみ再試行可能です (現在のステータス: {status})',
+    emptyFileRetryFailed: 'ファイル内容が空です。再試行できません。ファイルを再アップロードしてください。',
+    ragSystemPrompt: 'あなたは専門的なナレッジベースアシスタントです。以下の提供されたドキュメントの内容に基づいて、ユーザーの質問に答えてください。',
+    ragRules: '## ルール:\n1. 提供されたドキュメントの内容のみに基づいて回答し、情報を捏造しないでください。\n2. ドキュメントに関連情報がない場合は、その旨をユーザーに伝えてください。\n3. 回答には情報源を明記してください。形式:[ファイル名.拡張子]\n4. 複数のドキュメントで情報が矛盾している場合は、総合的に分析するか、異なる視点を説明してください。\n5. {lang}で回答してください。',
+    ragDocumentContent: '## ドキュメント内容:',
+    ragUserQuestion: '## ユーザーの質問:',
+    ragAnswer: '## 回答:',
+    ragSource: '### ソース:{fileName}',
+    ragSegment: 'セグメント {index} (類似度: {score}):',
+    ragNoDocumentFound: '関連するドキュメントが見つかりませんでした。',
+    queryExpansionPrompt: 'あなたは検索アシスタントです。以下のユーザーのクエリに対して、ベクトル検索でより良い結果を得るために、3つの異なるバリエーションを生成してください。各バリエーションは異なるキーワードや表現を使用しつつ、元の意味を維持する必要があります。数字やプレフィックスなしで、3行のクエリを直接出力してください:\n\nクエリ:{query}',
+    hydePrompt: '以下のユーザーの質問に対して、簡潔で事実に基づいた仮説的な回答(約200文字)を書いてください。「私の分析によると...」などの導入文は含めず、回答内容のみを直接出力してください。\n\n質問:{query}',
   },
   en: {
-    noMatchInKnowledgeGroup: 'No relevant content found in the selected knowledge group',
-  },
+    searching: 'Searching knowledge base...',
+    noResults: 'No relevant knowledge found, will answer based on general knowledge...',
+    searchFailed: 'Knowledge base search failed, will answer based on general knowledge...',
+    generatingResponse: 'Generating response',
+    files: ' files',
+    notebooks: ' notebooks',
+    all: 'all',
+    items: '',
+    searchResults: 'Search results',
+    relevantInfoFound: ' relevant info found',
+    searchHits: 'Search hits',
+    relevance: 'Relevance',
+    sourceFiles: 'Source files',
+    searchScope: 'Search scope',
+    error: 'Error',
+    creatingHistory: 'Creating new chat history: ',
+    searchingModelById: 'Searching model by ID: ',
+    searchModelFallback: 'Specified embedding model not found. Using first available model.',
+    noEmbeddingModelFound: 'No embedding model settings found',
+    usingEmbeddingModel: 'Using embedding model: ',
+    startingSearch: 'Starting knowledge base search...',
+    searchResultsCount: 'Search results count: ',
+    searchFailedLog: 'Search failed',
+    chatStreamError: 'Chat stream error',
+    assistStreamError: 'Assist stream error',
+    file: 'File',
+    content: 'Content',
+    userLabel: 'User',
+    assistantLabel: 'Assistant',
+    intelligentAssistant: 'You are an intelligent writing assistant.',
+    assistSystemPrompt: 'Please revise or improve the provided text content based on the user\'s instructions. Do not include greetings or closing phrases (such as "Understood, here is..." etc.), output only the revised content directly.',
+    contextLabel: 'Context (current content)',
+    userInstructionLabel: 'User instructions',
+    searchString: 'Search string: ',
+    embeddingModelIdNotProvided: 'Embedding model ID not provided',
+    generatingEmbeddings: 'Generating embeddings...',
+    embeddingsGenerated: 'Embeddings generated successfully',
+    dimensions: 'dimensions',
+    performingHybridSearch: 'Performing hybrid search...',
+    esSearchCompleted: 'ES search completed',
+    resultsCount: 'Results count',
+    hybridSearchFailed: 'Hybrid search failed',
+    getContextForTopicFailed: 'getContextForTopic failed',
+    noLLMConfigured: 'No LLM model configured for user',
+    simpleChatGenerationError: 'Simple chat generation error',
+    noMatchInKnowledgeGroup: 'No relevant content found in the selected knowledge group. The following is a general answer based on the model:',
+    uploadTextSuccess: 'Note content received. Indexing in background',
+    passwordChanged: 'Password changed successfully',
+    userCreated: 'User created successfully',
+    userInfoUpdated: 'User information updated',
+    userDeleted: 'User deleted',
+    pdfNoteTitle: 'PDF Note - {date}',
+    noTextExtracted: 'No text extracted',
+    kbCleared: 'Knowledge base cleared',
+    fileDeleted: 'File deleted',
+    pageImageNotFoundDetail: 'Could not retrieve image for PDF page {page}',
+    groupSyncSuccess: 'File groups updated',
+    fileDeletedFromGroup: 'File removed from group',
+    chunkConfigCorrection: 'Chunk config corrected: {warnings}',
+    noChunksGenerated: 'No chunks generated for file {id}',
+    chunkCountAnomaly: 'Actual chunk count {actual} significantly exceeds estimate {estimated}. Possible anomaly.',
+    batchSizeExceeded: 'Batch {index} size {actual} exceeds recommended limit {limit}. Splitting for processing.',
+    skippingEmptyVectorChunk: 'Skipping text block {index} due to empty vector',
+    contextLengthErrorFallback: 'Context length error occurred during batch processing. Downgrading to single processing mode.',
+    chunkLimitExceededForceBatch: 'Chunk count {actual} exceeds model batch limit {limit}. Forcing batch processing.',
+    noteContentRequired: 'Note content is required',
+    imageAnalysisStarted: 'Analyzing image with model {id}...',
+    batchAnalysisStarted: 'Batch analyzing {count} images...',
+    pageAnalysisFailed: 'Failed to analyze page {page}',
+    visionSystemPrompt: 'You are a professional document analysis assistant. Analyze this document image and return in JSON format according to these requirements:\n\n1. Extract all readable text (follow reading order, maintain paragraphs and formatting)\n2. Identify images/graphs/tables (describe content, meaning, and role)\n3. Analyze page layout (text only/mixed/table/graph, etc.)\n4. Evaluate analysis quality (0-1)\n\nResponse format:\n{\n  "text": "full text content",\n  "images": [\n    {"type": "graph type", "description": "detailed description", "position": 1}\n  ],\n  "layout": "layout description",\n  "confidence": 0.95\n}',
+    visionModelCall: '[Model Call] Type: Vision, Model: {model}, Page: {page}',
+    visionAnalysisSuccess: '✅ Vision analysis complete: {path}{page}, Text length: {textLen}, Images: {imgCount}, Layout: {layout}, Confidence: {confidence}%',
+    conversationHistoryNotFound: 'Conversation history not found',
+    batchContextLengthErrorFallback: 'Context length error occurred during small file batch processing. Downgrading to single processing mode.',
+    chunkProcessingFailed: 'Failed to process text block {index}. Skipping: {message}',
+    singleTextProcessingComplete: 'Single text processing complete: {count} chunks',
+    fileVectorizationComplete: 'File {id} vectorization complete. Processed {count} text blocks. Final memory: {memory}MB',
+    fileVectorizationFailed: 'File {id} vectorization failed',
+    batchProcessingStarted: 'Batch processing started: {count} items',
+    batchProcessingProgress: 'Processing batch {index}/{total}: {count} items',
+    batchProcessingComplete: 'Batch processing complete: {count} items in {duration}s',
+    onlyFailedFilesRetryable: 'Only failed files can be retried (current status: {status})',
+    emptyFileRetryFailed: 'File content is empty. Cannot retry. Please re-upload the file.',
+    ragSystemPrompt: 'You are a professional knowledge base assistant. Please answer the user\'s question based on the provided document content below.',
+    ragRules: '## Rules:\n1. Answer based only on the provided document content; do not fabricate information.\n2. If there is no relevant information in the documents, please inform the user.\n3. Clearly state the sources in your answer. Format: [filename.ext]\n4. If information in different documents is contradictory, analyze it comprehensively or explain the different perspectives.\n5. Please answer in {lang}.',
+    ragDocumentContent: '## Document Content:',
+    ragUserQuestion: '## User Question:',
+    ragAnswer: '## Answer:',
+    ragSource: '### Source: {fileName}',
+    ragSegment: 'Segment {index} (Similarity: {score}):',
+    ragNoDocumentFound: 'No relevant documents found.',
+    queryExpansionPrompt: 'You are a search assistant. Please generate 3 different variations of the following user query to help get better results in vector search. Each variation should use different keywords or phrasing while maintaining the original meaning. Output the 3 queries directly as 3 lines, without numbers or prefixes:\n\nQuery: {query}',
+    hydePrompt: 'Please write a brief, factual hypothetical answer (about 100 words) to the following user question. Do not include any introductory text (like "Based on my analysis..."), just output the answer content directly.\n\nQuestion: {query}',
+  }
 };

+ 2 - 1
server/src/knowledge-base/knowledge-base.service.ts

@@ -529,7 +529,8 @@ export class KnowledgeBaseService {
       });
 
     } catch (error) {
-      this.logger.error(`Vision pipeline error: ${error.message}, falling back to fast mode`);
+      this.logger.error(`Vision pipeline error: ${error.message}`, error.stack);
+      this.logger.error(`Falling back to fast mode for file ${kb.id}`);
       return this.processFastMode(kb, userId, tenantId, config);
     }
   }

+ 3 - 0
server/src/tenant/tenant.service.ts

@@ -85,6 +85,9 @@ export class TenantService {
                 if (setting.selectedRerankId && !data.enabledModelIds.includes(setting.selectedRerankId)) {
                     data.selectedRerankId = null as any;
                 }
+                if (setting.selectedVisionId && !data.enabledModelIds.includes(setting.selectedVisionId)) {
+                    data.selectedVisionId = null as any;
+                }
             }
             Object.assign(setting, data);
         }

+ 1 - 2
server/src/user/dto/update-user.dto.ts

@@ -1,4 +1,4 @@
-import { IsBoolean, IsOptional, IsString, MinLength, IsEnum } from 'class-validator';
+import { IsBoolean, IsOptional, IsString, IsEnum } from 'class-validator';
 import { ApiPropertyOptional } from '@nestjs/swagger';
 import { UserRole } from '../user-role.enum';
 
@@ -18,6 +18,5 @@ export class UpdateUserDto {
 
   @IsOptional()
   @IsString()
-  @MinLength(6)
   password?: string;
 }

+ 5 - 0
server/src/user/user.controller.ts

@@ -202,6 +202,11 @@ export class UserController {
       }
     }
 
+    // Validate password length if provided
+    if (body.password && body.password.length < 6) {
+      throw new BadRequestException(this.i18nService.getErrorMessage('passwordMinLength'));
+    }
+
     return this.userService.updateUser(id, body);
   }
 

+ 18 - 7
server/src/vision-pipeline/vision-pipeline.service.ts

@@ -55,17 +55,28 @@ export class VisionPipelineService {
       // Step 1: Unification of formats
       this.logger.log('📄 Step 1/4: Unification of formats');
       this.updateStatus('converting', 10, 'Converting document format...');
-      pdfPath = await this.convertToPDF(filePath);
-      this.logger.log(`✅ Format conversion completed: ${pdfPath}`);
+      try {
+        pdfPath = await this.convertToPDF(filePath);
+        this.logger.log(`✅ Format conversion completed: ${pdfPath}`);
+      } catch (convertError) {
+        this.logger.error(`❌ Format conversion failed: ${convertError.message}`);
+        throw convertError;
+      }
 
       // Step 2: Conversion from PDF to images
       this.logger.log('🖼️  Step 2/4: Conversion from PDF to images');
       this.updateStatus('splitting', 30, 'Converting PDF to images...');
-      const conversionResult = await this.pdf2Image.convertToImages(pdfPath, {
-        density: 300,
-        quality: 85,
-        format: 'jpeg',
-      });
+      let conversionResult;
+      try {
+        conversionResult = await this.pdf2Image.convertToImages(pdfPath, {
+          density: 300,
+          quality: 85,
+          format: 'jpeg',
+        });
+      } catch (imageError) {
+        this.logger.error(`❌ PDF to image conversion failed: ${imageError.message}`);
+        throw imageError;
+      }
 
       if (conversionResult.images.length === 0) {
         throw new Error(this.i18nService.getMessage('pdfToImageConversionFailed'));

+ 6 - 0
server/src/vision/vision.service.ts

@@ -135,6 +135,12 @@ export class VisionService {
 
       return result;
     } catch (error) {
+      this.logger.error(
+        this.i18nService.formatMessage('visionAnalysisFailed', { 
+          message: error.message 
+        })
+      );
+      this.logger.error(`Vision analysis error details: ${error.stack}`);
       throw error; // Re-throw error for retry mechanism
     }
   }

+ 2 - 2
web/components/ConfigPanel.tsx

@@ -63,7 +63,7 @@ const ConfigPanel: React.FC<ConfigPanelProps> = ({ settings, models, onSettingsC
                 disabled={!isAdmin}
                 className="w-full text-sm bg-slate-50 border border-slate-200 rounded-lg px-3 py-2 text-slate-700 focus:outline-none focus:border-blue-500 disabled:opacity-50 disabled:cursor-not-allowed"
               >
-                <option value="">{t('selectLLMModel')}</option>
+                <option value="">--- {t('selectLLMModel')} ---</option>
                 {llmModels.map(m => (
                   <option key={m.id} value={m.id}>
                     {m.name} ({m.modelId})
@@ -192,7 +192,7 @@ const ConfigPanel: React.FC<ConfigPanelProps> = ({ settings, models, onSettingsC
                 disabled={!settings.enableRerank || !isAdmin}
                 className="w-full text-sm bg-slate-50 border border-slate-200 rounded-lg px-3 py-2 text-slate-700 focus:outline-none focus:border-blue-500 disabled:opacity-50 disabled:cursor-not-allowed"
               >
-                <option value="">--- {t('noRerankModel')} ---</option>
+                <option value="">--- {t('selectRerankModel')} ---</option>
                 {rerankModels.map(m => (
                   <option key={m.id} value={m.id}>{m.name}</option>
                 ))}

+ 7 - 1
web/components/SettingsModal.tsx

@@ -251,6 +251,7 @@ export const SettingsModal: React.FC<SettingsModalProps> = ({
             case ModelType.LLM: return t('typeLLM');
             case ModelType.EMBEDDING: return t('typeEmbedding');
             case ModelType.RERANK: return t('typeRerank');
+            case ModelType.VISION: return t('typeVision');
         }
     };
 
@@ -290,20 +291,23 @@ export const SettingsModal: React.FC<SettingsModalProps> = ({
                     {t('changePassword')}
                 </h3>
                 <form onSubmit={handleChangePassword} className="space-y-4 max-w-sm">
+                    <input type="hidden" name="username" value="current-user" autoComplete="username" />
                     <div>
                         <input
                             type="password"
+                            name="currentPassword"
                             placeholder={t('currentPassword')}
                             value={passwordForm.current}
                             onChange={e => setPasswordForm({ ...passwordForm, current: e.target.value })}
                             className="w-full px-3 py-2 text-sm border border-slate-300 rounded-md focus:ring-2 focus:ring-blue-500 outline-none"
                             required
-                            autoComplete="new-password"
+                            autoComplete="current-password"
                         />
                     </div>
                     <div>
                         <input
                             type="password"
+                            name="newPassword"
                             placeholder={t('newPassword')}
                             value={passwordForm.new}
                             onChange={e => setPasswordForm({ ...passwordForm, new: e.target.value })}
@@ -315,6 +319,7 @@ export const SettingsModal: React.FC<SettingsModalProps> = ({
                     <div>
                         <input
                             type="password"
+                            name="confirmPassword"
                             placeholder={t('confirmPassword')}
                             value={passwordForm.confirm}
                             onChange={e => setPasswordForm({ ...passwordForm, confirm: e.target.value })}
@@ -372,6 +377,7 @@ export const SettingsModal: React.FC<SettingsModalProps> = ({
                         onChange={e => setNewUser({ ...newUser, username: e.target.value })}
                         className="w-full px-3 py-2 text-sm border border-slate-300 rounded-md"
                         required
+                        autoComplete="username"
                     />
                     <input
                         type="password"

+ 1 - 3
web/components/VisionModelSelector.tsx

@@ -75,9 +75,7 @@ const VisionModelSelector: React.FC<VisionModelSelectorProps> = ({ isAdmin = fal
             disabled={loading || !isAdmin}
             className="w-full text-sm bg-slate-50 border border-slate-200 rounded-lg px-3 py-2 text-slate-700 focus:outline-none focus:border-blue-500 disabled:opacity-50 disabled:cursor-not-allowed"
           >
-            <option value="">
-              {loading ? t('loading') : visionModels.length === 0 ? t('noVisionModels') : `--- ${t('selectVisionModel')} ---`}
-            </option>
+            <option value="">--- {t('selectVisionModel')} ---</option>
             {visionModels.map(model => (
               <option key={model.id} value={model.id}>
                 {model.name} ({model.modelId})

+ 51 - 21
web/components/views/SettingsView.tsx

@@ -1,5 +1,5 @@
 import React, { useState, useEffect } from 'react';
-import { ModelConfig, ModelType, AppSettings, KnowledgeGroup, Tenant, TenantMember } from '../../types';
+import { ModelConfig, ModelType, AppSettings, KnowledgeGroup, Tenant, TenantMember, DEFAULT_SETTINGS } from '../../types';
 import { useLanguage } from '../../contexts/LanguageContext';
 import {
   ChevronLeft,
@@ -268,15 +268,16 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
         setIsLoading(true);
         try {
             const data = await userSettingService.get(authToken);
-            if (data && Object.keys(data).length > 0) {
-                setKbSettings(data);
-                setLocalKbSettings(data);
-                if (data.selectedVisionId) {
-                    // Vision model ID is part of settings now
-                }
-            }
+            // If data is null, undefined, or empty object, use DEFAULT_SETTINGS
+            const finalSettings = (data && Object.keys(data).length > 0) ? { ...DEFAULT_SETTINGS, ...data } : DEFAULT_SETTINGS;
+            
+            setKbSettings(finalSettings);
+            setLocalKbSettings(finalSettings);
         } catch (error) {
             console.error(error);
+            // Fallback to defaults on error to prevent blank page
+            setKbSettings(DEFAULT_SETTINGS);
+            setLocalKbSettings(DEFAULT_SETTINGS);
         } finally {
             setIsLoading(false);
         }
@@ -563,7 +564,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
     };
 
     const handleRemoveTenant = async (tenantId: string) => {
-        if (!(await confirm('Delete this organization?'))) return;
+        if (!(await confirm(t('confirmDeleteTenant')))) return;
         try {
             await apiClient.delete(`/v1/tenants/${tenantId}`);
             setSelectedTenantId(null);
@@ -623,7 +624,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
 
         try {
             const result = await userService.importUsers(file);
-            showSuccess(t('importSuccess').replace('$1', (result.created + result.updated).toString()).replace('$2', result.errors.length.toString()));
+            showSuccess(t('importSuccess').replace('$1', (result.success || 0).toString()).replace('$2', (result.failed || 0).toString()));
             fetchUsers();
             if (result.errors.length > 0) {
                 console.warn('Import had errors:', result.errors);
@@ -669,7 +670,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
     };
 
     const handleDeleteModel = async (id: string) => {
-        if (await confirm(t('confirmClear'))) {
+        if (await confirm(t('confirmDeleteModel'))) {
             await onUpdateModels('delete', { id } as ModelConfig);
         }
     };
@@ -741,6 +742,8 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
             case ModelType.LLM: return t('typeLLM');
             case ModelType.EMBEDDING: return t('typeEmbedding');
             case ModelType.RERANK: return t('typeRerank');
+            case ModelType.VISION: return t('typeVision');
+            default: return type;
         }
     };
 
@@ -756,20 +759,23 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                     {t('changePassword')}
                 </h3>
                 <form onSubmit={handleChangePassword} className="space-y-4 max-w-sm">
+                    <input type="hidden" name="username" value="current-user" autoComplete="username" />
                     <div>
                         <input
                             type="password"
+                            name="currentPassword"
                             placeholder={t('currentPassword')}
                             value={passwordForm.current}
                             onChange={e => setPasswordForm({ ...passwordForm, current: e.target.value })}
                             className="w-full px-3 py-2 text-sm border border-slate-300 rounded-md focus:ring-2 focus:ring-blue-500 outline-none"
                             required
-                            autoComplete="new-password"
+                            autoComplete="current-password"
                         />
                     </div>
                     <div>
                         <input
                             type="password"
+                            name="newPassword"
                             placeholder={t('newPassword')}
                             value={passwordForm.new}
                             onChange={e => setPasswordForm({ ...passwordForm, new: e.target.value })}
@@ -781,6 +787,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                     <div>
                         <input
                             type="password"
+                            name="confirmPassword"
                             placeholder={t('confirmPassword')}
                             value={passwordForm.confirm}
                             onChange={e => setPasswordForm({ ...passwordForm, confirm: e.target.value })}
@@ -896,6 +903,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                             onChange={e => setNewUser({ ...newUser, username: e.target.value })}
                             className="w-full px-4 py-3 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium focus:ring-4 focus:ring-indigo-500/10 focus:border-indigo-500/50 outline-none transition-all"
                             required
+                            autoComplete="username"
                         />
                         <input
                             type="text"
@@ -904,6 +912,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                             onChange={e => setNewUser({ ...newUser, displayName: e.target.value })}
                             className="w-full px-4 py-3 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium focus:ring-4 focus:ring-indigo-500/10 focus:border-indigo-500/50 outline-none transition-all"
                             required
+                            autoComplete="name"
                         />
                         <input
                             type="password"
@@ -1507,7 +1516,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
 
     const renderKnowledgeBaseTab = () => (
         <div className="space-y-8 animate-in slide-in-from-right duration-300 w-full max-w-5xl pb-10">
-            {localKbSettings && (
+            {localKbSettings ? (
                 <>
                     {/* Save/Cancel Bar */}
                     <div className="flex justify-end gap-3 sticky top-0 z-20 py-4 bg-white/50 backdrop-blur-sm border-b border-slate-100 mb-6">
@@ -1544,7 +1553,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                                     onChange={(e) => handleUpdateKbSettings('selectedLLMId', e.target.value)}
                                     className="w-full px-4 py-3.5 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium outline-none focus:ring-4 focus:ring-indigo-500/10 transition-all cursor-pointer appearance-none"
                                 >
-                                    <option value="">{t('selectLLM')}</option>
+                                     <option value="">--- {t('selectLLMModel')} ---</option>
                                     {models.filter(m => m.type === ModelType.LLM).map(m => (
                                         <option key={m.id} value={m.id}>{m.name} ({m.modelId})</option>
                                     ))}
@@ -1558,7 +1567,7 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                                         onChange={(e) => handleUpdateKbSettings('selectedEmbeddingId', e.target.value)}
                                         className="w-full px-4 py-3.5 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium outline-none focus:ring-4 focus:ring-indigo-500/10 transition-all"
                                     >
-                                        <option value="">{t('selectEmbedding')}</option>
+                                         <option value="">--- {t('selectEmbeddingModel')} ---</option>
                                         {models.filter(m => m.type === ModelType.EMBEDDING).map(m => (
                                             <option key={m.id} value={m.id}>{m.name}</option>
                                         ))}
@@ -1570,13 +1579,29 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                                         value={localKbSettings.selectedRerankId || ''}
                                         onChange={(e) => handleUpdateKbSettings('selectedRerankId', e.target.value)}
                                         className="w-full px-4 py-3.5 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium outline-none focus:ring-4 focus:ring-indigo-500/10 transition-all"
-                                    >
-                                        <option value="">{t('none')}</option>
+                                     >
+                                        <option value="">--- {t('selectRerankModel')} ---</option>
                                         {models.filter(m => m.type === ModelType.RERANK).map(m => (
                                             <option key={m.id} value={m.id}>{m.name}</option>
                                         ))}
                                     </select>
                                 </div>
+                                <div>
+                                    <label className="block text-[10px] font-black text-slate-400 uppercase tracking-widest mb-2 px-1">
+                                        {t('defaultVisionModel')}
+                                        <span className="ml-1 text-[8px] opacity-60">({t('typeVision')})</span>
+                                    </label>
+                                    <select
+                                        value={localKbSettings.selectedVisionId || ''}
+                                        onChange={(e) => handleUpdateKbSettings('selectedVisionId', e.target.value)}
+                                        className="w-full px-4 py-3.5 bg-slate-50 border border-slate-200 rounded-2xl text-sm font-medium outline-none focus:ring-4 focus:ring-indigo-500/10 transition-all"
+                                     >
+                                        <option value="">--- {t('selectVisionModel')} ---</option>
+                                        {models.filter(m => m.type === ModelType.VISION || m.supportsVision).map(m => (
+                                            <option key={m.id} value={m.id}>{m.name}</option>
+                                        ))}
+                                    </select>
+                                </div>
                             </div>
                         </div>
                     </section>
@@ -1621,10 +1646,10 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                                 />
                             </div>
                         </div>
-                    </section >
+                    </section>
 
                     {/* Chat Hyperparameters */}
-                    < section className="bg-white/80 backdrop-blur-md p-8 rounded-3xl border border-slate-200/50 shadow-sm space-y-6" >
+                    <section className="bg-white/80 backdrop-blur-md p-8 rounded-3xl border border-slate-200/50 shadow-sm space-y-6">
                         <div className="flex items-center gap-3 text-slate-900 font-black uppercase tracking-widest text-[11px] border-b border-slate-100 pb-4">
                             <div className="w-8 h-8 rounded-xl bg-pink-50 flex items-center justify-center text-pink-600">
                                 <Sparkles size={16} />
@@ -1661,10 +1686,10 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                                 />
                             </div>
                         </div>
-                    </section >
+                    </section>
 
                     {/* Retrieval & Search Settings */}
-                    < section className="bg-white/80 backdrop-blur-md p-8 rounded-3xl border border-slate-200/50 shadow-sm space-y-6" >
+                    <section className="bg-white/80 backdrop-blur-md p-8 rounded-3xl border border-slate-200/50 shadow-sm space-y-6">
                         <div className="flex items-center gap-3 text-slate-900 font-black uppercase tracking-widest text-[11px] border-b border-slate-100 pb-4">
                             <div className="w-8 h-8 rounded-xl bg-emerald-50 flex items-center justify-center text-emerald-600">
                                 <Database size={16} />
@@ -1815,6 +1840,11 @@ export const SettingsView: React.FC<SettingsViewProps> = ({
                         </div>
                     </section>
                 </>
+            ) : (
+                <div className="flex flex-col items-center justify-center py-20 space-y-4">
+                    <Loader2 size={40} className="animate-spin text-indigo-600 opacity-20" />
+                    <p className="text-sm font-medium text-slate-400 animate-pulse">{t('loading')}</p>
+                </div>
             )}
         </div>
     );

+ 1 - 0
web/types.ts

@@ -282,6 +282,7 @@ export const DEFAULT_SETTINGS: AppSettings = {
   selectedLLMId: '',
   selectedEmbeddingId: '',
   selectedRerankId: '',
+  selectedVisionId: '',
 
   temperature: 0.3,
   maxTokens: 8192,

+ 15 - 6
web/utils/translations.ts

@@ -45,7 +45,7 @@ export const translations = {
     ragSettings: "RAG 设置",
     enableRerank: "启用重排序 (Rerank)",
     enableRerankDesc: "使用重排序模型对检索结果进行二次精排,提高准确性",
-    selectRerankModel: "选择 Rerank 模型",
+    selectRerankModel: "选择Rerank模型",
     selectModelPlaceholder: "请选择模型...",
 
     // Config Panel
@@ -151,7 +151,8 @@ export const translations = {
     userPromotedToAdmin: "用户已提升为管理员",
     userDemotedFromAdmin: "用户已降级为普通用户",
     updateUserFailed: "更新用户失败",
-    confirmDeleteUser: "Confirm要删除此用户吗?",
+    confirmDeleteUser: "确定要删除此用户吗?",
+    confirmDeleteTenant: "确定要删除此组织吗?",
     deleteUser: "删除用户",
     deleteUserFailed: "删除用户失败",
     editUser: "编辑用户",
@@ -178,6 +179,7 @@ export const translations = {
     loadVisionModelFailed: "加载视觉模型失败",
     loadFailed: "加载失败,请检查网络连接",
     saveVisionModelFailed: "保存视觉模型失败",
+    defaultSettingFailed: "默认设置保存失败",
     noVisionModels: "没有可用的视觉模型",
     selectVisionModel: "请选择视觉模型",
     visionModelHelp: "用于处理图片文件的视觉模型。如果没有可用模型,请在模型管理中添加并勾选“支持视觉”选项。",
@@ -200,7 +202,8 @@ export const translations = {
     errorLabel: "错误",
     errorNoModel: "未选择推理模型或配置无效。",
     aiDisclaimer: "AI 可能会犯错。请核实源文件中的重要信息。",
-    confirmClear: "Confirm要清空所有文件及索引吗?",
+    confirmClear: "确定要清空所有文件及索引吗?",
+    confirmDeleteModel: "确定要删除此模型吗?",
     removeFile: "移除文件",
     apiError: "缺少配置或 API 密钥无效。",
     geminiError: "API 请求失败。",
@@ -913,7 +916,7 @@ export const translations = {
     ragSettings: "RAG Settings",
     enableRerank: "Enable Rerank",
     enableRerankDesc: "Use a rerank model to re-rank retrieval results for better accuracy",
-    selectRerankModel: "Select Rerank Model",
+    selectRerankModel: "Please select Rerank model",
     selectModelPlaceholder: "Select a model...",
 
     headerModelSelection: "Model Selection",
@@ -1018,6 +1021,7 @@ export const translations = {
     userDemotedFromAdmin: "User demoted from administrator",
     updateUserFailed: "Failed to update user",
     confirmDeleteUser: "Are you sure you want to delete this user?",
+    confirmDeleteTenant: "Are you sure you want to delete this organization?",
     deleteUser: "Delete User",
     deleteUserFailed: "Failed to delete user",
     userDeletedSuccessfully: "User deleted successfully",
@@ -1070,6 +1074,7 @@ export const translations = {
     loadVisionModelFailed: "Failed to load vision models",
     loadFailed: "Loading failed, please check network connection",
     saveVisionModelFailed: "Failed to save vision model",
+    defaultSettingFailed: "Failed to save default settings",
     noVisionModels: "No vision models available",
     selectVisionModel: "Please select vision model",
     visionModelHelp: "Vision model for processing image files. If no models are available, please add one in model management and check the 'Supports Vision' option.",
@@ -1092,6 +1097,7 @@ export const translations = {
     errorNoModel: "No inference model selected or config invalid.",
     aiDisclaimer: "AI can make mistakes. Verify important info.",
     confirmClear: "Delete all files and indices?",
+    confirmDeleteModel: "Are you sure you want to delete this model?",
     removeFile: "Remove file",
     apiError: "Missing config or invalid API Key.",
     geminiError: "API Request Failed.",
@@ -1704,8 +1710,8 @@ export const translations = {
     // RAG Settings
     ragSettings: "RAG 設定",
     enableRerank: "リランクを有効にする",
-    enableRerankDesc: "リランクモデルを使用してSearch resultsを再ランク付けし、精度を向上させます",
-    selectRerankModel: "リランクモデルの選択",
+    enableRerankDesc: "リランクモデルを使用して検索結果を再ランク付けし、精度を向上させます",
+    selectRerankModel: "リランクモデルを選択してください",
     selectModelPlaceholder: "モデルを選択...",
 
     headerModelSelection: "モデル選択",
@@ -1812,6 +1818,7 @@ export const translations = {
     userDemotedFromAdmin: "ユーザーを一般ユーザーに降格しました",
     updateUserFailed: "ユーザー情報の更新に失敗しました",
     confirmDeleteUser: "このユーザーを削除してもよろしいですか?",
+    confirmDeleteTenant: "この組織を削除してもよろしいですか?",
     deleteUser: "ユーザー削除",
     deleteUserFailed: "ユーザーの削除に失敗しました",
     userDeletedSuccessfully: "ユーザーを削除しました",
@@ -1864,6 +1871,7 @@ export const translations = {
     loadVisionModelFailed: "ビジョンモデルの読み込みに失敗しました",
     loadFailed: "読み込みに失敗しました、ネットワーク接続を確認してください",
     saveVisionModelFailed: "ビジョンモデルの保存に失敗しました",
+    defaultSettingFailed: "デフォルト設定の保存に失敗しました",
     noVisionModels: "利用可能なビジョンモデルがありません",
     selectVisionModel: "ビジョンモデルを選択してください",
     visionModelHelp: "画像ファイルを処理するためのビジョンモデル。利用可能なモデルがない場合は、モデル管理でAddedし、「ビジョン対応」オプションをチェックしてください。",
@@ -1886,6 +1894,7 @@ export const translations = {
     errorNoModel: "モデルが選択されていないか、設定が無効です。",
     aiDisclaimer: "AIは間違いを犯す可能性があります。",
     confirmClear: "すべてのファイルを削除しますか?",
+    confirmDeleteModel: "このモデルを削除してもよろしいですか?",
     removeFile: "ファイルを削除",
     apiError: "設定が不足しているか、APIキーが有効ではありません。",
     geminiError: "APIRequest failed。",