AI Agent:Agents:llms:GeminiLlm.py:ソースコード


from Agents.llms.LlmBase import LlmBase # Import the new base class
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_google_genai.chat_models import ChatGoogleGenerativeAIError
from langchain_core.language_models.chat_models import BaseChatModel # SystemMessage を追加
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage, SystemMessage, AIMessageChunk
from typing import List, Optional, Any # Any を追加
from tools.exception import InterruptedException as InterruptedException

class GeminiLLM(LlmBase): # Inherit from LlmBase
    def __init__(self, model_identifier: str = "gemini-2.5-flash", temperature: float = 0, **kwargs):

        super().__init__(model_identifier, temperature, **kwargs) # Call base class constructor
        
        # Specific GeminiLLM initialization
        # self.llm is already initialized in LlmBase.__init__ via _initialize_llm()
        
        # Easy image support detection (more accurate detection needed)
        self._supports_tolls = True
        self._supports_images = True
        #if "vision" in self.model_name or "gemini-2.5-pro" in self.model_name:
        #    self._supports_images = True
        #    print(f"GeminiLLM: モデル '{self.model_name}' は画像対応の可能性があります。")
        #else:
        #    self._supports_images = False

    def _initialize_llm(self) -> BaseChatModel:
        try:
            self.llm = ChatGoogleGenerativeAI(
                model=self.model_name,
                temperature=self.temperature,
                streaming=True,
                **self.llm_kwargs # kwargsではなくself.llm_kwargsを使用
            )
            return self.llm
        except Exception as e:
            print(f"GeminiLLM初期化エラー: {e}. モデル名: {self.model_name}")
            # フォールバックやデフォルトモデルでの再試行などを検討
            raise


    @property
    def supports_images(self) -> bool:
        return self._supports_images


    @property
    def supports_tools(self) -> bool:
        return self._supports_tolls