from Agents.llms.LlmBase import LlmBase # Import the new base class
# LangChainDeprecationWarning に従い、新しいパッケージからインポートする
from langchain_openai import ChatOpenAI
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage, SystemMessage, AIMessageChunk
from typing import List, Optional, Any, Dict # Dict を追加
from tools.exception import InterruptedException as InterruptedException # または共通の例外モジュールから
import requests
import requests
import json
import lmstudio as lms
class LMStudioLLM(LlmBase): # Inherit from LlmBase
def __init__(self, model_identifier: str = "llama2", temperature: float = 0, **kwargs):
super().__init__(model_identifier, temperature, **kwargs) # Call base class constructor
# Specific OllamaLLM initialization
# self.llm is already initialized in LlmBase.__init__ via _initialize_llm()
model = self.get_lmstudio_model_states(self.model_name)
# Easy image support detection (more accurate detection needed)
self._supports_images = getattr(model, "vision",False)
self._supports_tools = getattr(model, "trained_for_tool_use",False)
print("model:", model)
#self._supports_tools = True
self._supports_thinking = False
def _initialize_llm(self) -> BaseChatModel:
try:
self.llm = ChatOpenAI(
base_url="http://localhost:1234/v1",
api_key="not-needed", # ローカルLLMではAPIキーは不要
model=self.model_name,
temperature=self.temperature,
streaming=True,
**self.llm_kwargs # kwargsではなくself.llm_kwargsを使用
)
return self.llm
except Exception as e:
print(f"LMstudio初期化エラー: {e}. モデル名: {self.model_name}, kwargs: {kwargs}")
raise
@property
def supports_images(self) -> bool:
return self._supports_images
@property
def supports_tools(self) -> bool:
return self._supports_tools
def get_lmstudio_model_states(self, model_name: str):
"""
ollamaのモデルのステータスを返す
"""
for model in lms.list_downloaded_models():
if model.model_key == model_name:
return model.__dict__["_data"]