代码from typing import Any import requests from langchain_anthropic import ChatAnthropic from langchain_ibm import ChatWatsonx from langchain_ollama import ChatOllama from langchain_openai import ChatOpenAI from pydantic.v1 import SecretStr from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS from lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS from lfx.base.models.google_generative_ai_model import ChatGoogleGenerativeAIFixed from lfx.base.models.model import LCModelComponent from lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url from lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES from lfx.field_typing import LanguageModel from lfx.field_typing.range_spec import RangeSpec from lfx.inputs.inputs import BoolInput, MessageTextInput, StrInput from lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput from lfx.log.logger import logger from lfx.schema.dotdict import dotdict from lfx.utils.util import transform_localhost_url # IBM watsonx.ai constants IBM_WATSONX_DEFAULT_MODELS [ibm/granite-3-2b-instruct, ibm/granite-3-8b-instruct, ibm/granite-13b-instruct-v2] IBM_WATSONX_URLS [ https://us-south.ml.cloud.ibm.com, https://eu-de.ml.cloud.ibm.com, https://eu-gb.ml.cloud.ibm.com, https://au-syd.ml.cloud.ibm.com, https://jp-tok.ml.cloud.ibm.com, https://ca-tor.ml.cloud.ibm.com, ] # Ollama API constants HTTP_STATUS_OK 200 JSON_MODELS_KEY models JSON_NAME_KEY name JSON_CAPABILITIES_KEY capabilities DESIRED_CAPABILITY completion DEFAULT_OLLAMA_URL http://localhost:11434 class LanguageModelComponent(LCModelComponent): display_name Language Model description Runs a language model given a specified provider. documentation: str https://docs.langflow.org/components-models icon brain-circuit category models priority 0 # Set priority to 0 to make it appear first staticmethod def fetch_ibm_models(base_url: str) - list[str]: Fetch available models from the watsonx.ai API. try: endpoint f{base_url}/ml/v1/foundation_model_specs params {version: 2024-09-16, filters: function_text_chat,!lifecycle_withdrawn} response requests.get(endpoint, paramsparams, timeout10) response.raise_for_status() data response.json() models [model[model_id] for model in data.get(resources, [])] return sorted(models) except Exception: # noqa: B001 logger.exception(Error fetching IBM watsonx models. Using default models.) return IBM_WATSONX_DEFAULT_MODELS inputs [ DropdownInput( nameprovider, display_nameModel Provider, options[OpenAI, Anthropic, Google, IBM watsonx.ai, Ollama, OpenAI Compatible], valueOpenAI, infoSelect the model provider, real_time_refreshTrue, options_metadata[ {icon: OpenAI}, {icon: Anthropic}, {icon: GoogleGenerativeAI}, {icon: WatsonxAI}, {icon: Ollama}, {icon: OpenAI}, ], ), DropdownInput( namemodel_name, display_nameModel Name, optionsOPENAI_CHAT_MODEL_NAMES OPENAI_REASONING_MODEL_NAMES, valueOPENAI_CHAT_MODEL_NAMES[0], infoSelect the model to use, real_time_refreshTrue, refresh_buttonTrue, ), SecretStrInput( nameapi_key, display_nameOpenAI API Key, infoModel Provider API key, requiredFalse, showTrue, real_time_refreshTrue, ), DropdownInput( namebase_url_ibm_watsonx, display_namewatsonx API Endpoint, infoThe base URL of the API (IBM watsonx.ai only), optionsIBM_WATSONX_URLS, valueIBM_WATSONX_URLS[0], showFalse, real_time_refreshTrue, ), StrInput( namebase_url, display_nameAPI Base URL, valuehttps://api.openai.com/v1, showFalse, requiredFalse, ), StrInput( nameproject_id, display_namewatsonx Project ID, infoThe project ID associated with the foundation model (IBM watsonx.ai only), showFalse, requiredFalse, ), StrInput( namecustom_model, display_nameCustom Model Name, valuegpt-4o-mini, showFalse, requiredFalse, ), MessageTextInput( nameollama_base_url, display_nameOllama API URL, infofEndpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}, valueDEFAULT_OLLAMA_URL, showFalse, real_time_refreshTrue, load_from_dbTrue, ), MessageInput( nameinput_value, display_nameInput, infoThe input text to send to the model, ), MultilineInput( namesystem_message, display_nameSystem Message, infoA system message that helps set the behavior of the assistant, advancedFalse, ), BoolInput( namestream, display_nameStream, infoWhether to stream the response, valueFalse, advancedTrue, ), SliderInput( nametemperature, display_nameTemperature, value0.1, infoControls randomness of responses, range_specRangeSpec(min0, max1, step0.01), advancedTrue, ), ] def build_model(self) - LanguageModel: provider self.provider model_name self.model_name temperature self.temperature stream self.stream custom_model self.custom_model # 修复定义变量 if provider OpenAI: if not self.api_key: msg OpenAI API key is required when using OpenAI provider raise ValueError(msg) if model_name in OPENAI_REASONING_MODEL_NAMES: temperature None return ChatOpenAI( model_namemodel_name, temperaturetemperature, streamingstream, openai_api_keyself.api_key, ) if provider Anthropic: if not self.api_key: msg Anthropic API key is required when using Anthropic provider raise ValueError(msg) return ChatAnthropic( modelmodel_name, temperaturetemperature, streamingstream, anthropic_api_keyself.api_key, ) if provider Google: if not self.api_key: msg Google API key is required when using Google provider raise ValueError(msg) return ChatGoogleGenerativeAIFixed( modelmodel_name, temperaturetemperature, streamingstream, google_api_keyself.api_key, ) if provider IBM watsonx.ai: if not self.api_key: msg IBM API key is required when using IBM watsonx.ai provider raise ValueError(msg) if not self.base_url_ibm_watsonx: msg IBM watsonx API Endpoint is required when using IBM watsonx.ai provider raise ValueError(msg) if not self.project_id: msg IBM watsonx Project ID is required when using IBM watsonx.ai provider raise ValueError(msg) return ChatWatsonx( apikeySecretStr(self.api_key).get_secret_value(), urlself.base_url_ibm_watsonx, project_idself.project_id, model_idmodel_name, params{temperature: temperature}, streamingstream, ) if provider Ollama: if not self.ollama_base_url: msg Ollama API URL is required when using Ollama provider raise ValueError(msg) if not model_name: msg Model name is required when using Ollama provider raise ValueError(msg) transformed_base_url transform_localhost_url(self.ollama_base_url) if transformed_base_url and transformed_base_url.rstrip(/).endswith(/v1): transformed_base_url transformed_base_url.rstrip(/).removesuffix(/v1) logger.warning( Detected /v1 suffix in base URL. The Ollama component uses the native Ollama API, not the OpenAI-compatible API. The /v1 suffix has been automatically removed. ) return ChatOllama( base_urltransformed_base_url, modelmodel_name, temperaturetemperature, ) if provider OpenAI Compatible: return ChatOpenAI( model_namecustom_model, # 使用自定义模型 temperaturetemperature, streamingstream, openai_api_keyself.api_key or dummy, openai_api_baseself.base_url, ) msg fUnknown provider: {provider} raise ValueError(msg) async def update_build_config( self, build_config: dotdict, field_value: Any, field_name: str | None None ) - dotdict: if field_name provider: if field_value OpenAI: build_config[model_name][show] True build_config[custom_model][show] False build_config[model_name][options] OPENAI_CHAT_MODEL_NAMES OPENAI_REASONING_MODEL_NAMES build_config[model_name][value] OPENAI_CHAT_MODEL_NAMES[0] build_config[api_key][display_name] OpenAI API Key build_config[api_key][show] True build_config[base_url][show] False build_config[base_url_ibm_watsonx][show] False build_config[project_id][show] False build_config[ollama_base_url][show] False elif field_value Anthropic: build_config[model_name][show] True build_config[custom_model][show] False build_config[model_name][options] ANTHROPIC_MODELS build_config[model_name][value] ANTHROPIC_MODELS[0] build_config[api_key][display_name] Anthropic API Key build_config[api_key][show] True build_config[base_url][show] False build_config[base_url_ibm_watsonx][show] False build_config[project_id][show] False build_config[ollama_base_url][show] False elif field_value Google: build_config[model_name][show] True build_config[custom_model][show] False build_config[model_name][options] GOOGLE_GENERATIVE_AI_MODELS build_config[model_name][value] GOOGLE_GENERATIVE_AI_MODELS[0] build_config[api_key][display_name] Google API Key build_config[api_key][show] True build_config[base_url][show] False build_config[base_url_ibm_watsonx][show] False build_config[project_id][show] False build_config[ollama_base_url][show] False elif field_value IBM watsonx.ai: build_config[model_name][show] True build_config[custom_model][show] False build_config[model_name][options] IBM_WATSONX_DEFAULT_MODELS build_config[model_name][value] IBM_WATSONX_DEFAULT_MODELS[0] build_config[api_key][display_name] IBM API Key build_config[api_key][show] True build_config[base_url][show] False build_config[base_url_ibm_watsonx][show] True build_config[project_id][show] True build_config[ollama_base_url][show] False elif field_value Ollama: build_config[model_name][show] True build_config[custom_model][show] False build_config[api_key][show] False build_config[base_url][show] False build_config[base_url_ibm_watsonx][show] False build_config[project_id][show] False build_config[ollama_base_url][show] True ollama_url getattr(self, ollama_base_url, None) if not ollama_url: config_value build_config[ollama_base_url].get(value, DEFAULT_OLLAMA_URL) is_variable_ref ( config_value and isinstance(config_value, str) and config_value.isupper() and _ in config_value ) if is_variable_ref: await logger.adebug( fConfig value appears to be a variable reference: {config_value}, using default ) ollama_url DEFAULT_OLLAMA_URL else: ollama_url config_value await logger.adebug(fFetching Ollama models for provider switch. URL: {ollama_url}) if await is_valid_ollama_url(urlollama_url): try: models await get_ollama_models( base_url_valueollama_url, desired_capabilityDESIRED_CAPABILITY, json_models_keyJSON_MODELS_KEY, json_name_keyJSON_NAME_KEY, json_capabilities_keyJSON_CAPABILITIES_KEY, ) build_config[model_name][options] models build_config[model_name][value] models[0] if models else except ValueError: await logger.awarning(Failed to fetch Ollama models. Setting empty options.) build_config[model_name][options] [] build_config[model_name][value] else: await logger.awarning(fInvalid Ollama URL: {ollama_url}) build_config[model_name][options] [] build_config[model_name][value] elif field_value OpenAI Compatible: build_config[model_name][show] False # 隐藏下拉模型 build_config[custom_model][show] True # 显示自定义输入 build_config[api_key][display_name] API Key build_config[api_key][show] True build_config[base_url][show] True build_config[base_url_ibm_watsonx][show] False build_config[project_id][show] False build_config[ollama_base_url][show] False elif ( field_name base_url_ibm_watsonx and field_value and hasattr(self, provider) and self.provider IBM watsonx.ai ): try: models self.fetch_ibm_models(base_urlfield_value) build_config[model_name][options] models build_config[model_name][value] models[0] if models else IBM_WATSONX_DEFAULT_MODELS[0] except Exception: # noqa: B001 logger.exception(Error updating IBM model options.) elif field_name ollama_base_url: logger.debug( fFetching Ollama models from updated URL: {build_config[ollama_base_url]} ) await logger.adebug(fFetching Ollama models from updated URL: {self.ollama_base_url}) if await is_valid_ollama_url(urlself.ollama_base_url): try: models await get_ollama_models( base_url_valueself.ollama_base_url, desired_capabilityDESIRED_CAPABILITY, json_models_keyJSON_MODELS_KEY, json_name_keyJSON_NAME_KEY, json_capabilities_keyJSON_CAPABILITIES_KEY, ) build_config[model_name][options] models build_config[model_name][value] models[0] if models else except ValueError: await logger.awarning(Error updating Ollama model options.) build_config[model_name][options] [] build_config[model_name][value] else: await logger.awarning(fInvalid Ollama URL: {self.ollama_base_url}) build_config[model_name][options] [] build_config[model_name][value] elif field_name model_name: if hasattr(self, provider) and self.provider Ollama: ollama_url getattr(self, ollama_base_url, DEFAULT_OLLAMA_URL) if await is_valid_ollama_url(urlollama_url): try: models await get_ollama_models( base_url_valueollama_url, desired_capabilityDESIRED_CAPABILITY, json_models_keyJSON_MODELS_KEY, json_name_keyJSON_NAME_KEY, json_capabilities_keyJSON_CAPABILITIES_KEY, ) build_config[model_name][options] models except ValueError: await logger.awarning(Failed to refresh Ollama models.) build_config[model_name][options] [] else: build_config[model_name][options] [] if field_value and field_value.startswith(o1) and hasattr(self, provider) and self.provider OpenAI: if system_message in build_config: build_config[system_message][show] False elif system_message in build_config: build_config[system_message][show] True return build_config