AI Agent:Agents:llms:OllamaLlm.py:ソースコード

from Agents.llms.LlmBase import LlmBase # Import the new base class
# LangChainDeprecationWarning に従い、新しいパッケージからインポートする

from langchain_ollama import ChatOllama

from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage, SystemMessage, AIMessageChunk
from typing import List, Optional, Any, Dict # Dict を追加
from tools.exception import InterruptedException as InterruptedException # または共通の例外モジュールから
import requests
import requests
import json


class OllamaLLM(LlmBase): # Inherit from LlmBase
    def __init__(self, model_identifier: str = "llama2", temperature: float = 0, **kwargs):
        super().__init__(model_identifier, temperature, **kwargs) # Call base class constructor
        
        # Specific OllamaLLM initialization
        # self.llm is already initialized in LlmBase.__init__ via _initialize_llm()
        capabilities = self.get_ollama_model_capabilities(model_identifier)
        # Easy image support detection (more accurate detection needed)
        if "vision" in capabilities:

            self._supports_images = True
            print(f"OllamaLLM: モデル '{self.model_name}' は画像対応の可能性があります。")
        else:
            self._supports_images = False
            
        if "tools" in capabilities:
            self._supports_tools = True
        else:
            self._supports_tools = False
        print("capabilities:", capabilities)
        if "thinking" in capabilities:
            self._supports_thinking = True
        else:
            self._supports_thinking = False

    def _initialize_llm(self) -> BaseChatModel:
        try:
            self.llm = ChatOllama(
                model=self.model_name,
                temperature=self.temperature,
                **self.llm_kwargs # kwargsではなくself.llm_kwargsを使用
            )
            return self.llm
        except Exception as e:
            print(f"OllamaLLM初期化エラー: {e}. モデル名: {self.model_name}, kwargs: {kwargs}")
            raise
        
    @property
    def supports_images(self) -> bool:
        return self._supports_images

    @property
    def supports_tools(self) -> bool:
        return self._supports_tools


    def get_ollama_model_capabilities(self, model_name: str):
        """
        ollamaのモデルが
        imageやtoolを使えるかを返す。
        """
        try:
            response = requests.post(
                "http://localhost:11434/api/show",
                json={"model": model_name}
            )
            if response.status_code == 200:
                # 明示的にJSON文字列を辞書に変換
                raw = response.json()
                if isinstance(raw, str):
                    data = json.loads(raw)
                else:
                    data = raw

                capabilities=data.get("capabilities", {})

                return capabilities
            print("response.status_code", response.status_code)
            return None
        except Exception as e:
            print(f"モデル情報取得エラー: {e}")
            return None
    def get_ollama_models(self):
        try:
            response = requests.get("http://localhost:11434/api/tags")
            if response.status_code == 200:
                models = response.json().get("models", [])
                print("models",models)
                return [model["name"] for model in models]
            else:
                print(f"エラー: {response.status_code}")
                return []
        except Exception as e:
            print(f"API呼び出し失敗: {e}")
            return []