mirror of
https://github.com/langgenius/dify.git
synced 2026-02-18 23:14:06 +00:00
Compare commits
6 Commits
0.3.31-fix
...
0.3.32
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cc697832a | ||
|
|
bb98f5756a | ||
|
|
e1d2203371 | ||
|
|
93467cb363 | ||
|
|
ea526d0822 | ||
|
|
0e627c920f |
@@ -91,7 +91,7 @@ class Config:
|
||||
# ------------------------
|
||||
# General Configurations.
|
||||
# ------------------------
|
||||
self.CURRENT_VERSION = "0.3.31"
|
||||
self.CURRENT_VERSION = "0.3.32"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
|
||||
@@ -115,7 +115,7 @@ class ModelProviderModelValidateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='json')
|
||||
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -155,7 +155,7 @@ class ModelProviderModelUpdateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='json')
|
||||
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -184,7 +184,7 @@ class ModelProviderModelUpdateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='args')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='args')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
provider_service = ProviderService()
|
||||
|
||||
@@ -1,27 +1,45 @@
|
||||
import decimal
|
||||
import logging
|
||||
from typing import List, Optional, Any
|
||||
|
||||
import openai
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.llms import ChatGLM
|
||||
from langchain.schema import LLMResult
|
||||
from langchain.schema import LLMResult, get_buffer_string
|
||||
|
||||
from core.model_providers.error import LLMBadRequestError
|
||||
from core.model_providers.error import LLMBadRequestError, LLMRateLimitError, LLMAuthorizationError, \
|
||||
LLMAPIUnavailableError, LLMAPIConnectionError
|
||||
from core.model_providers.models.llm.base import BaseLLM
|
||||
from core.model_providers.models.entity.message import PromptMessage, MessageType
|
||||
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
|
||||
from core.third_party.langchain.llms.chat_open_ai import EnhanceChatOpenAI
|
||||
|
||||
|
||||
class ChatGLMModel(BaseLLM):
|
||||
model_mode: ModelMode = ModelMode.COMPLETION
|
||||
model_mode: ModelMode = ModelMode.CHAT
|
||||
|
||||
def _init_client(self) -> Any:
|
||||
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
|
||||
return ChatGLM(
|
||||
|
||||
extra_model_kwargs = {
|
||||
'top_p': provider_model_kwargs.get('top_p')
|
||||
}
|
||||
|
||||
if provider_model_kwargs.get('max_length') is not None:
|
||||
extra_model_kwargs['max_length'] = provider_model_kwargs.get('max_length')
|
||||
|
||||
client = EnhanceChatOpenAI(
|
||||
model_name=self.name,
|
||||
temperature=provider_model_kwargs.get('temperature'),
|
||||
max_tokens=provider_model_kwargs.get('max_tokens'),
|
||||
model_kwargs=extra_model_kwargs,
|
||||
streaming=self.streaming,
|
||||
callbacks=self.callbacks,
|
||||
endpoint_url=self.credentials.get('api_base'),
|
||||
**provider_model_kwargs
|
||||
request_timeout=60,
|
||||
openai_api_key="1",
|
||||
openai_api_base=self.credentials['api_base'] + '/v1'
|
||||
)
|
||||
|
||||
return client
|
||||
|
||||
def _run(self, messages: List[PromptMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
@@ -45,19 +63,40 @@ class ChatGLMModel(BaseLLM):
|
||||
:return:
|
||||
"""
|
||||
prompts = self._get_prompt_from_messages(messages)
|
||||
return max(self._client.get_num_tokens(prompts), 0)
|
||||
return max(sum([self._client.get_num_tokens(get_buffer_string([m])) for m in prompts]) - len(prompts), 0)
|
||||
|
||||
def get_currency(self):
|
||||
return 'RMB'
|
||||
|
||||
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
|
||||
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
|
||||
for k, v in provider_model_kwargs.items():
|
||||
if hasattr(self.client, k):
|
||||
setattr(self.client, k, v)
|
||||
extra_model_kwargs = {
|
||||
'top_p': provider_model_kwargs.get('top_p')
|
||||
}
|
||||
|
||||
self.client.temperature = provider_model_kwargs.get('temperature')
|
||||
self.client.max_tokens = provider_model_kwargs.get('max_tokens')
|
||||
self.client.model_kwargs = extra_model_kwargs
|
||||
|
||||
def handle_exceptions(self, ex: Exception) -> Exception:
|
||||
if isinstance(ex, ValueError):
|
||||
return LLMBadRequestError(f"ChatGLM: {str(ex)}")
|
||||
if isinstance(ex, openai.error.InvalidRequestError):
|
||||
logging.warning("Invalid request to ChatGLM API.")
|
||||
return LLMBadRequestError(str(ex))
|
||||
elif isinstance(ex, openai.error.APIConnectionError):
|
||||
logging.warning("Failed to connect to ChatGLM API.")
|
||||
return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex))
|
||||
elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)):
|
||||
logging.warning("ChatGLM service unavailable.")
|
||||
return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex))
|
||||
elif isinstance(ex, openai.error.RateLimitError):
|
||||
return LLMRateLimitError(str(ex))
|
||||
elif isinstance(ex, openai.error.AuthenticationError):
|
||||
return LLMAuthorizationError(str(ex))
|
||||
elif isinstance(ex, openai.error.OpenAIError):
|
||||
return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex))
|
||||
else:
|
||||
return ex
|
||||
|
||||
@classmethod
|
||||
def support_streaming(cls):
|
||||
return True
|
||||
@@ -0,0 +1,58 @@
|
||||
import logging
|
||||
from typing import Optional, List
|
||||
|
||||
from langchain.schema import Document
|
||||
from xinference_client.client.restful.restful_client import Client
|
||||
|
||||
from core.model_providers.error import LLMBadRequestError
|
||||
from core.model_providers.models.reranking.base import BaseReranking
|
||||
from core.model_providers.providers.base import BaseModelProvider
|
||||
|
||||
|
||||
class XinferenceReranking(BaseReranking):
|
||||
|
||||
def __init__(self, model_provider: BaseModelProvider, name: str):
|
||||
self.credentials = model_provider.get_model_credentials(
|
||||
model_name=name,
|
||||
model_type=self.type
|
||||
)
|
||||
|
||||
client = Client(self.credentials['server_url'])
|
||||
|
||||
super().__init__(model_provider, client, name)
|
||||
|
||||
def rerank(self, query: str, documents: List[Document], score_threshold: Optional[float], top_k: Optional[int]) -> Optional[List[Document]]:
|
||||
docs = []
|
||||
doc_id = []
|
||||
for document in documents:
|
||||
if document.metadata['doc_id'] not in doc_id:
|
||||
doc_id.append(document.metadata['doc_id'])
|
||||
docs.append(document.page_content)
|
||||
|
||||
model = self.client.get_model(self.credentials['model_uid'])
|
||||
response = model.rerank(query=query, documents=docs, top_n=top_k)
|
||||
rerank_documents = []
|
||||
|
||||
for idx, result in enumerate(response['results']):
|
||||
# format document
|
||||
index = result['index']
|
||||
rerank_document = Document(
|
||||
page_content=result['document'],
|
||||
metadata={
|
||||
"doc_id": documents[index].metadata['doc_id'],
|
||||
"doc_hash": documents[index].metadata['doc_hash'],
|
||||
"document_id": documents[index].metadata['document_id'],
|
||||
"dataset_id": documents[index].metadata['dataset_id'],
|
||||
'score': result['relevance_score']
|
||||
}
|
||||
)
|
||||
# score threshold check
|
||||
if score_threshold is not None:
|
||||
if result.relevance_score >= score_threshold:
|
||||
rerank_documents.append(rerank_document)
|
||||
else:
|
||||
rerank_documents.append(rerank_document)
|
||||
return rerank_documents
|
||||
|
||||
def handle_exceptions(self, ex: Exception) -> Exception:
|
||||
return LLMBadRequestError(f"Xinference rerank: {str(ex)}")
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
from json import JSONDecodeError
|
||||
from typing import Type
|
||||
|
||||
import requests
|
||||
from langchain.llms import ChatGLM
|
||||
|
||||
from core.helper import encrypter
|
||||
@@ -25,21 +26,26 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
if model_type == ModelType.TEXT_GENERATION:
|
||||
return [
|
||||
{
|
||||
'id': 'chatglm2-6b',
|
||||
'name': 'ChatGLM2-6B',
|
||||
'mode': ModelMode.COMPLETION.value,
|
||||
'id': 'chatglm3-6b',
|
||||
'name': 'ChatGLM3-6B',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
},
|
||||
{
|
||||
'id': 'chatglm-6b',
|
||||
'name': 'ChatGLM-6B',
|
||||
'mode': ModelMode.COMPLETION.value,
|
||||
'id': 'chatglm3-6b-32k',
|
||||
'name': 'ChatGLM3-6B-32K',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
},
|
||||
{
|
||||
'id': 'chatglm2-6b',
|
||||
'name': 'ChatGLM2-6B',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
}
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
def _get_text_generation_model_mode(self, model_name) -> str:
|
||||
return ModelMode.COMPLETION.value
|
||||
return ModelMode.CHAT.value
|
||||
|
||||
def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]:
|
||||
"""
|
||||
@@ -64,16 +70,19 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
:return:
|
||||
"""
|
||||
model_max_tokens = {
|
||||
'chatglm-6b': 2000,
|
||||
'chatglm2-6b': 32000,
|
||||
'chatglm3-6b-32k': 32000,
|
||||
'chatglm3-6b': 8000,
|
||||
'chatglm2-6b': 8000,
|
||||
}
|
||||
|
||||
max_tokens_alias = 'max_length' if model_name == 'chatglm2-6b' else 'max_tokens'
|
||||
|
||||
return ModelKwargsRules(
|
||||
temperature=KwargRule[float](min=0, max=2, default=1, precision=2),
|
||||
top_p=KwargRule[float](min=0, max=1, default=0.7, precision=2),
|
||||
presence_penalty=KwargRule[float](enabled=False),
|
||||
frequency_penalty=KwargRule[float](enabled=False),
|
||||
max_tokens=KwargRule[int](alias='max_token', min=10, max=model_max_tokens.get(model_name), default=2048, precision=0),
|
||||
max_tokens=KwargRule[int](alias=max_tokens_alias, min=10, max=model_max_tokens.get(model_name), default=2048, precision=0),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -85,16 +94,10 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
raise CredentialsValidateFailedError('ChatGLM Endpoint URL must be provided.')
|
||||
|
||||
try:
|
||||
credential_kwargs = {
|
||||
'endpoint_url': credentials['api_base']
|
||||
}
|
||||
response = requests.get(f"{credentials['api_base']}/v1/models", timeout=5)
|
||||
|
||||
llm = ChatGLM(
|
||||
max_token=10,
|
||||
**credential_kwargs
|
||||
)
|
||||
|
||||
llm("ping")
|
||||
if response.status_code != 200:
|
||||
raise Exception('ChatGLM Endpoint URL is invalid.')
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ import json
|
||||
from typing import Type
|
||||
|
||||
import requests
|
||||
from xinference_client.client.restful.restful_client import Client
|
||||
|
||||
from core.helper import encrypter
|
||||
from core.model_providers.models.embedding.xinference_embedding import XinferenceEmbedding
|
||||
from core.model_providers.models.entity.model_params import KwargRule, ModelKwargsRules, ModelType, ModelMode
|
||||
from core.model_providers.models.llm.xinference_model import XinferenceModel
|
||||
from core.model_providers.models.reranking.xinference_reranking import XinferenceReranking
|
||||
from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError
|
||||
|
||||
from core.model_providers.models.base import BaseProviderModel
|
||||
@@ -40,6 +42,8 @@ class XinferenceProvider(BaseModelProvider):
|
||||
model_class = XinferenceModel
|
||||
elif model_type == ModelType.EMBEDDINGS:
|
||||
model_class = XinferenceEmbedding
|
||||
elif model_type == ModelType.RERANKING:
|
||||
model_class = XinferenceReranking
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -113,6 +117,10 @@ class XinferenceProvider(BaseModelProvider):
|
||||
)
|
||||
|
||||
embedding.embed_query("ping")
|
||||
elif model_type == ModelType.RERANKING:
|
||||
rerank_client = Client(credential_kwargs['server_url'])
|
||||
model = rerank_client.get_model(credential_kwargs['model_uid'])
|
||||
model.rerank(query="ping", documents=["ping", "pong"], top_n=2)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"model_flexibility": "configurable",
|
||||
"supported_model_types": [
|
||||
"text-generation",
|
||||
"embeddings"
|
||||
"embeddings",
|
||||
"reranking"
|
||||
]
|
||||
}
|
||||
@@ -213,16 +213,16 @@ class OrchestratorRuleParser:
|
||||
continue
|
||||
dataset_ids.append(dataset.id)
|
||||
if retrieval_model == 'single':
|
||||
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
|
||||
top_k = retrieval_model['top_k']
|
||||
retrieval_model_config = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
|
||||
top_k = retrieval_model_config['top_k']
|
||||
|
||||
# dynamically adjust top_k when the remaining token number is not enough to support top_k
|
||||
# top_k = self._dynamic_calc_retrieve_k(dataset=dataset, top_k=top_k, rest_tokens=rest_tokens)
|
||||
|
||||
score_threshold = None
|
||||
score_threshold_enable = retrieval_model.get("score_threshold_enable")
|
||||
score_threshold_enable = retrieval_model_config.get("score_threshold_enable")
|
||||
if score_threshold_enable:
|
||||
score_threshold = retrieval_model.get("score_threshold")
|
||||
score_threshold = retrieval_model_config.get("score_threshold")
|
||||
|
||||
tool = DatasetRetrieverTool.from_dataset(
|
||||
dataset=dataset,
|
||||
|
||||
@@ -48,7 +48,7 @@ huggingface_hub~=0.16.4
|
||||
transformers~=4.31.0
|
||||
stripe~=5.5.0
|
||||
pandas==1.5.3
|
||||
xinference-client~=0.5.4
|
||||
xinference-client~=0.6.4
|
||||
safetensors==0.3.2
|
||||
zhipuai==1.0.7
|
||||
werkzeug==2.3.7
|
||||
|
||||
@@ -50,4 +50,7 @@ XINFERENCE_MODEL_UID=
|
||||
OPENLLM_SERVER_URL=
|
||||
|
||||
# LocalAI Credentials
|
||||
LOCALAI_SERVER_URL=
|
||||
LOCALAI_SERVER_URL=
|
||||
|
||||
# Cohere Credentials
|
||||
COHERE_API_KEY=
|
||||
@@ -0,0 +1,61 @@
|
||||
import json
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.schema import Document
|
||||
|
||||
from core.model_providers.models.reranking.cohere_reranking import CohereReranking
|
||||
from core.model_providers.providers.cohere_provider import CohereProvider
|
||||
from models.provider import Provider, ProviderType
|
||||
|
||||
|
||||
def get_mock_provider(valid_api_key):
|
||||
return Provider(
|
||||
id='provider_id',
|
||||
tenant_id='tenant_id',
|
||||
provider_name='cohere',
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
encrypted_config=json.dumps({'api_key': valid_api_key}),
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
|
||||
def get_mock_model():
|
||||
valid_api_key = os.environ['COHERE_API_KEY']
|
||||
provider = CohereProvider(provider=get_mock_provider(valid_api_key))
|
||||
return CohereReranking(
|
||||
model_provider=provider,
|
||||
name='rerank-english-v2.0'
|
||||
)
|
||||
|
||||
|
||||
def decrypt_side_effect(tenant_id, encrypted_api_key):
|
||||
return encrypted_api_key
|
||||
|
||||
|
||||
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
|
||||
def test_run(mock_decrypt):
|
||||
model = get_mock_model()
|
||||
|
||||
docs = []
|
||||
docs.append(Document(
|
||||
page_content='bye',
|
||||
metadata={
|
||||
"doc_id": 'a',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
docs.append(Document(
|
||||
page_content='hello',
|
||||
metadata={
|
||||
"doc_id": 'b',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
rst = model.rerank('hello', docs, None, 2)
|
||||
|
||||
assert rst[0].page_content == 'hello'
|
||||
@@ -0,0 +1,78 @@
|
||||
import json
|
||||
import os
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from langchain.schema import Document
|
||||
|
||||
from core.model_providers.models.entity.model_params import ModelType
|
||||
from core.model_providers.models.reranking.xinference_reranking import XinferenceReranking
|
||||
from core.model_providers.providers.xinference_provider import XinferenceProvider
|
||||
from models.provider import Provider, ProviderType, ProviderModel
|
||||
|
||||
|
||||
def get_mock_provider(valid_server_url, valid_model_uid):
|
||||
return Provider(
|
||||
id='provider_id',
|
||||
tenant_id='tenant_id',
|
||||
provider_name='xinference',
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
encrypted_config=json.dumps({'server_url': valid_server_url, 'model_uid': valid_model_uid}),
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
|
||||
def get_mock_model(mocker):
|
||||
valid_server_url = os.environ['XINFERENCE_SERVER_URL']
|
||||
valid_model_uid = os.environ['XINFERENCE_MODEL_UID']
|
||||
model_name = 'bge-reranker-base'
|
||||
provider = XinferenceProvider(provider=get_mock_provider(valid_server_url, valid_model_uid))
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_query.filter.return_value.first.return_value = ProviderModel(
|
||||
provider_name='xinference',
|
||||
model_name=model_name,
|
||||
model_type=ModelType.RERANKING.value,
|
||||
encrypted_config=json.dumps({
|
||||
'server_url': valid_server_url,
|
||||
'model_uid': valid_model_uid
|
||||
}),
|
||||
is_valid=True,
|
||||
)
|
||||
mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query)
|
||||
|
||||
return XinferenceReranking(
|
||||
model_provider=provider,
|
||||
name=model_name
|
||||
)
|
||||
|
||||
|
||||
def decrypt_side_effect(tenant_id, encrypted_api_key):
|
||||
return encrypted_api_key
|
||||
|
||||
|
||||
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
|
||||
def test_run(mock_decrypt, mocker):
|
||||
model = get_mock_model(mocker)
|
||||
|
||||
docs = []
|
||||
docs.append(Document(
|
||||
page_content='bye',
|
||||
metadata={
|
||||
"doc_id": 'a',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
docs.append(Document(
|
||||
page_content='hello',
|
||||
metadata={
|
||||
"doc_id": 'b',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
rst = model.rerank('hello', docs, None, 2)
|
||||
|
||||
assert rst[0].page_content == 'hello'
|
||||
@@ -2,7 +2,9 @@ import pytest
|
||||
from unittest.mock import patch
|
||||
import json
|
||||
|
||||
import requests
|
||||
from langchain.schema import LLMResult, Generation, AIMessage, ChatResult, ChatGeneration
|
||||
from requests import Response
|
||||
|
||||
from core.model_providers.providers.base import CredentialsValidateFailedError
|
||||
from core.model_providers.providers.chatglm_provider import ChatGLMProvider
|
||||
@@ -26,8 +28,11 @@ def decrypt_side_effect(tenant_id, encrypted_key):
|
||||
|
||||
|
||||
def test_is_provider_credentials_valid_or_raise_valid(mocker):
|
||||
mocker.patch('langchain.llms.chatglm.ChatGLM._call',
|
||||
return_value="abc")
|
||||
mock_response = Response()
|
||||
mock_response.status_code = 200
|
||||
mock_response._content = json.dumps({'models': []}).encode('utf-8')
|
||||
mocker.patch('requests.get',
|
||||
return_value=mock_response)
|
||||
|
||||
MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.1'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.3.31-fix3
|
||||
image: langgenius/dify-api:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@@ -128,7 +128,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.3.31-fix3
|
||||
image: langgenius/dify-api:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
@@ -196,7 +196,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.3.31-fix3
|
||||
image: langgenius/dify-web:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
EDITION: SELF_HOSTED
|
||||
|
||||
@@ -14,10 +14,10 @@ export type IMoreInfoProps = {
|
||||
const MoreInfo: FC<IMoreInfoProps> = ({ more, isQuestion, className }) => {
|
||||
const { t } = useTranslation()
|
||||
return (<div className={`mt-1 w-full text-xs text-gray-400 ${isQuestion ? 'mr-2 text-right ' : 'pl-2 text-left float-right'} ${className}`}>
|
||||
<span>{`${t('appLog.detail.timeConsuming')} ${more.latency}${t('appLog.detail.second')}`}</span>
|
||||
<span>{`${t('appLog.detail.tokenCost')} ${formatNumber(more.tokens)}`}</span>
|
||||
<span>· </span>
|
||||
<span>{more.time} </span>
|
||||
<span className='mr-2'>{`${t('appLog.detail.timeConsuming')} ${more.latency}${t('appLog.detail.second')}`}</span>
|
||||
<span className='mr-2'>{`${t('appLog.detail.tokenCost')} ${formatNumber(more.tokens)}`}</span>
|
||||
<span className='mr-2'>·</span>
|
||||
<span>{more.time}</span>
|
||||
</div>)
|
||||
}
|
||||
export default React.memo(MoreInfo)
|
||||
|
||||
@@ -80,6 +80,13 @@ const config: ProviderConfig = {
|
||||
'zh-Hans': 'Embeddings',
|
||||
},
|
||||
},
|
||||
{
|
||||
key: 'reranking',
|
||||
label: {
|
||||
'en': 'Rerank',
|
||||
'zh-Hans': 'Rerank',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
|
||||
@@ -150,10 +150,11 @@ const Form: FC<FormProps> = ({
|
||||
|
||||
if (field.type === 'radio') {
|
||||
const options = typeof field.options === 'function' ? field.options(value) : field.options
|
||||
|
||||
return (
|
||||
<div key={field.key} className='py-3'>
|
||||
<div className={nameClassName}>{field.label[locale]}</div>
|
||||
<div className='grid grid-cols-2 gap-3'>
|
||||
<div className={`grid grid-cols-${options?.length} gap-3`}>
|
||||
{
|
||||
options?.map(option => (
|
||||
<div
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.3.31",
|
||||
"version": "0.3.32",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
|
||||
Reference in New Issue
Block a user