mirror of
https://github.com/langgenius/dify.git
synced 2026-01-22 07:04:09 +00:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cc697832a | ||
|
|
bb98f5756a | ||
|
|
e1d2203371 | ||
|
|
93467cb363 | ||
|
|
ea526d0822 | ||
|
|
0e627c920f | ||
|
|
ea35f1dce1 | ||
|
|
a5b80c9d1f | ||
|
|
f704094a5f | ||
|
|
1f58f15bff | ||
|
|
b930716745 | ||
|
|
9587479b76 | ||
|
|
3c0fbf3a6a | ||
|
|
caa330c91f | ||
|
|
4a55d5729d | ||
|
|
d6a6697891 | ||
|
|
778cfb37a2 | ||
|
|
ce85ee3aa6 | ||
|
|
b23de4affc | ||
|
|
d8a7e894aa |
4
.github/workflows/build-api-image.yml
vendored
4
.github/workflows/build-api-image.yml
vendored
@@ -34,9 +34,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
|
||||
4
.github/workflows/build-web-image.yml
vendored
4
.github/workflows/build-web-image.yml
vendored
@@ -34,9 +34,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
|
||||
@@ -91,7 +91,7 @@ class Config:
|
||||
# ------------------------
|
||||
# General Configurations.
|
||||
# ------------------------
|
||||
self.CURRENT_VERSION = "0.3.31"
|
||||
self.CURRENT_VERSION = "0.3.32"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
|
||||
@@ -62,16 +62,15 @@ class DailyConversationStatistic(Resource):
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'conversation_count': i.conversation_count
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'conversation_count': i.conversation_count
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
@@ -124,16 +123,15 @@ class DailyTerminalsStatistic(Resource):
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'terminal_count': i.terminal_count
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'terminal_count': i.terminal_count
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
@@ -187,18 +185,17 @@ class DailyTokenCostStatistic(Resource):
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'token_count': i.token_count,
|
||||
'total_price': i.total_price,
|
||||
'currency': 'USD'
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'token_count': i.token_count,
|
||||
'total_price': i.total_price,
|
||||
'currency': 'USD'
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
@@ -256,16 +253,15 @@ LEFT JOIN conversations c on c.id=subquery.conversation_id
|
||||
GROUP BY date
|
||||
ORDER BY date"""
|
||||
|
||||
response_data = []
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'interactions': float(i.interactions.quantize(Decimal('0.01')))
|
||||
})
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'interactions': float(i.interactions.quantize(Decimal('0.01')))
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
@@ -320,20 +316,19 @@ class UserSatisfactionRateStatistic(Resource):
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
})
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
class AverageResponseTimeStatistic(Resource):
|
||||
@@ -383,16 +378,15 @@ class AverageResponseTimeStatistic(Resource):
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'latency': round(i.latency * 1000, 4)
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'latency': round(i.latency * 1000, 4)
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
@@ -447,16 +441,15 @@ WHERE app_id = :app_id'''
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'tps': round(i.tokens_per_second, 4)
|
||||
})
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'tps': round(i.tokens_per_second, 4)
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
|
||||
@@ -115,7 +115,7 @@ class ModelProviderModelValidateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='json')
|
||||
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -155,7 +155,7 @@ class ModelProviderModelUpdateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='json')
|
||||
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -184,7 +184,7 @@ class ModelProviderModelUpdateApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('model_name', type=str, required=True, nullable=False, location='args')
|
||||
parser.add_argument('model_type', type=str, required=True, nullable=False,
|
||||
choices=['text-generation', 'embeddings', 'speech2text'], location='args')
|
||||
choices=['text-generation', 'embeddings', 'speech2text', 'reranking'], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
provider_service = ProviderService()
|
||||
|
||||
@@ -111,7 +111,7 @@ class WeaviateVectorIndex(BaseVectorIndex):
|
||||
if self._vector_store:
|
||||
return self._vector_store
|
||||
|
||||
attributes = ['doc_id', 'dataset_id', 'document_id']
|
||||
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
|
||||
if self._is_origin():
|
||||
attributes = ['doc_id']
|
||||
|
||||
|
||||
@@ -1,27 +1,45 @@
|
||||
import decimal
|
||||
import logging
|
||||
from typing import List, Optional, Any
|
||||
|
||||
import openai
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.llms import ChatGLM
|
||||
from langchain.schema import LLMResult
|
||||
from langchain.schema import LLMResult, get_buffer_string
|
||||
|
||||
from core.model_providers.error import LLMBadRequestError
|
||||
from core.model_providers.error import LLMBadRequestError, LLMRateLimitError, LLMAuthorizationError, \
|
||||
LLMAPIUnavailableError, LLMAPIConnectionError
|
||||
from core.model_providers.models.llm.base import BaseLLM
|
||||
from core.model_providers.models.entity.message import PromptMessage, MessageType
|
||||
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
|
||||
from core.third_party.langchain.llms.chat_open_ai import EnhanceChatOpenAI
|
||||
|
||||
|
||||
class ChatGLMModel(BaseLLM):
|
||||
model_mode: ModelMode = ModelMode.COMPLETION
|
||||
model_mode: ModelMode = ModelMode.CHAT
|
||||
|
||||
def _init_client(self) -> Any:
|
||||
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
|
||||
return ChatGLM(
|
||||
|
||||
extra_model_kwargs = {
|
||||
'top_p': provider_model_kwargs.get('top_p')
|
||||
}
|
||||
|
||||
if provider_model_kwargs.get('max_length') is not None:
|
||||
extra_model_kwargs['max_length'] = provider_model_kwargs.get('max_length')
|
||||
|
||||
client = EnhanceChatOpenAI(
|
||||
model_name=self.name,
|
||||
temperature=provider_model_kwargs.get('temperature'),
|
||||
max_tokens=provider_model_kwargs.get('max_tokens'),
|
||||
model_kwargs=extra_model_kwargs,
|
||||
streaming=self.streaming,
|
||||
callbacks=self.callbacks,
|
||||
endpoint_url=self.credentials.get('api_base'),
|
||||
**provider_model_kwargs
|
||||
request_timeout=60,
|
||||
openai_api_key="1",
|
||||
openai_api_base=self.credentials['api_base'] + '/v1'
|
||||
)
|
||||
|
||||
return client
|
||||
|
||||
def _run(self, messages: List[PromptMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
@@ -45,19 +63,40 @@ class ChatGLMModel(BaseLLM):
|
||||
:return:
|
||||
"""
|
||||
prompts = self._get_prompt_from_messages(messages)
|
||||
return max(self._client.get_num_tokens(prompts), 0)
|
||||
return max(sum([self._client.get_num_tokens(get_buffer_string([m])) for m in prompts]) - len(prompts), 0)
|
||||
|
||||
def get_currency(self):
|
||||
return 'RMB'
|
||||
|
||||
def _set_model_kwargs(self, model_kwargs: ModelKwargs):
|
||||
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs)
|
||||
for k, v in provider_model_kwargs.items():
|
||||
if hasattr(self.client, k):
|
||||
setattr(self.client, k, v)
|
||||
extra_model_kwargs = {
|
||||
'top_p': provider_model_kwargs.get('top_p')
|
||||
}
|
||||
|
||||
self.client.temperature = provider_model_kwargs.get('temperature')
|
||||
self.client.max_tokens = provider_model_kwargs.get('max_tokens')
|
||||
self.client.model_kwargs = extra_model_kwargs
|
||||
|
||||
def handle_exceptions(self, ex: Exception) -> Exception:
|
||||
if isinstance(ex, ValueError):
|
||||
return LLMBadRequestError(f"ChatGLM: {str(ex)}")
|
||||
if isinstance(ex, openai.error.InvalidRequestError):
|
||||
logging.warning("Invalid request to ChatGLM API.")
|
||||
return LLMBadRequestError(str(ex))
|
||||
elif isinstance(ex, openai.error.APIConnectionError):
|
||||
logging.warning("Failed to connect to ChatGLM API.")
|
||||
return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex))
|
||||
elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)):
|
||||
logging.warning("ChatGLM service unavailable.")
|
||||
return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex))
|
||||
elif isinstance(ex, openai.error.RateLimitError):
|
||||
return LLMRateLimitError(str(ex))
|
||||
elif isinstance(ex, openai.error.AuthenticationError):
|
||||
return LLMAuthorizationError(str(ex))
|
||||
elif isinstance(ex, openai.error.OpenAIError):
|
||||
return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex))
|
||||
else:
|
||||
return ex
|
||||
|
||||
@classmethod
|
||||
def support_streaming(cls):
|
||||
return True
|
||||
@@ -0,0 +1,58 @@
|
||||
import logging
|
||||
from typing import Optional, List
|
||||
|
||||
from langchain.schema import Document
|
||||
from xinference_client.client.restful.restful_client import Client
|
||||
|
||||
from core.model_providers.error import LLMBadRequestError
|
||||
from core.model_providers.models.reranking.base import BaseReranking
|
||||
from core.model_providers.providers.base import BaseModelProvider
|
||||
|
||||
|
||||
class XinferenceReranking(BaseReranking):
|
||||
|
||||
def __init__(self, model_provider: BaseModelProvider, name: str):
|
||||
self.credentials = model_provider.get_model_credentials(
|
||||
model_name=name,
|
||||
model_type=self.type
|
||||
)
|
||||
|
||||
client = Client(self.credentials['server_url'])
|
||||
|
||||
super().__init__(model_provider, client, name)
|
||||
|
||||
def rerank(self, query: str, documents: List[Document], score_threshold: Optional[float], top_k: Optional[int]) -> Optional[List[Document]]:
|
||||
docs = []
|
||||
doc_id = []
|
||||
for document in documents:
|
||||
if document.metadata['doc_id'] not in doc_id:
|
||||
doc_id.append(document.metadata['doc_id'])
|
||||
docs.append(document.page_content)
|
||||
|
||||
model = self.client.get_model(self.credentials['model_uid'])
|
||||
response = model.rerank(query=query, documents=docs, top_n=top_k)
|
||||
rerank_documents = []
|
||||
|
||||
for idx, result in enumerate(response['results']):
|
||||
# format document
|
||||
index = result['index']
|
||||
rerank_document = Document(
|
||||
page_content=result['document'],
|
||||
metadata={
|
||||
"doc_id": documents[index].metadata['doc_id'],
|
||||
"doc_hash": documents[index].metadata['doc_hash'],
|
||||
"document_id": documents[index].metadata['document_id'],
|
||||
"dataset_id": documents[index].metadata['dataset_id'],
|
||||
'score': result['relevance_score']
|
||||
}
|
||||
)
|
||||
# score threshold check
|
||||
if score_threshold is not None:
|
||||
if result.relevance_score >= score_threshold:
|
||||
rerank_documents.append(rerank_document)
|
||||
else:
|
||||
rerank_documents.append(rerank_document)
|
||||
return rerank_documents
|
||||
|
||||
def handle_exceptions(self, ex: Exception) -> Exception:
|
||||
return LLMBadRequestError(f"Xinference rerank: {str(ex)}")
|
||||
@@ -32,9 +32,12 @@ class AnthropicProvider(BaseModelProvider):
|
||||
if model_type == ModelType.TEXT_GENERATION:
|
||||
return [
|
||||
{
|
||||
'id': 'claude-instant-1',
|
||||
'name': 'claude-instant-1',
|
||||
'id': 'claude-2.1',
|
||||
'name': 'claude-2.1',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
'features': [
|
||||
ModelFeature.AGENT_THOUGHT.value
|
||||
]
|
||||
},
|
||||
{
|
||||
'id': 'claude-2',
|
||||
@@ -44,6 +47,11 @@ class AnthropicProvider(BaseModelProvider):
|
||||
ModelFeature.AGENT_THOUGHT.value
|
||||
]
|
||||
},
|
||||
{
|
||||
'id': 'claude-instant-1',
|
||||
'name': 'claude-instant-1',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
},
|
||||
]
|
||||
else:
|
||||
return []
|
||||
@@ -73,12 +81,18 @@ class AnthropicProvider(BaseModelProvider):
|
||||
:param model_type:
|
||||
:return:
|
||||
"""
|
||||
model_max_tokens = {
|
||||
'claude-instant-1': 100000,
|
||||
'claude-2': 100000,
|
||||
'claude-2.1': 200000,
|
||||
}
|
||||
|
||||
return ModelKwargsRules(
|
||||
temperature=KwargRule[float](min=0, max=1, default=1, precision=2),
|
||||
top_p=KwargRule[float](min=0, max=1, default=0.7, precision=2),
|
||||
presence_penalty=KwargRule[float](enabled=False),
|
||||
frequency_penalty=KwargRule[float](enabled=False),
|
||||
max_tokens=KwargRule[int](alias="max_tokens_to_sample", min=10, max=100000, default=256, precision=0),
|
||||
max_tokens=KwargRule[int](alias="max_tokens_to_sample", min=10, max=model_max_tokens.get(model_name, 100000), default=256, precision=0),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
from json import JSONDecodeError
|
||||
from typing import Type
|
||||
|
||||
import requests
|
||||
from langchain.llms import ChatGLM
|
||||
|
||||
from core.helper import encrypter
|
||||
@@ -25,21 +26,26 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
if model_type == ModelType.TEXT_GENERATION:
|
||||
return [
|
||||
{
|
||||
'id': 'chatglm2-6b',
|
||||
'name': 'ChatGLM2-6B',
|
||||
'mode': ModelMode.COMPLETION.value,
|
||||
'id': 'chatglm3-6b',
|
||||
'name': 'ChatGLM3-6B',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
},
|
||||
{
|
||||
'id': 'chatglm-6b',
|
||||
'name': 'ChatGLM-6B',
|
||||
'mode': ModelMode.COMPLETION.value,
|
||||
'id': 'chatglm3-6b-32k',
|
||||
'name': 'ChatGLM3-6B-32K',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
},
|
||||
{
|
||||
'id': 'chatglm2-6b',
|
||||
'name': 'ChatGLM2-6B',
|
||||
'mode': ModelMode.CHAT.value,
|
||||
}
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
def _get_text_generation_model_mode(self, model_name) -> str:
|
||||
return ModelMode.COMPLETION.value
|
||||
return ModelMode.CHAT.value
|
||||
|
||||
def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]:
|
||||
"""
|
||||
@@ -64,16 +70,19 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
:return:
|
||||
"""
|
||||
model_max_tokens = {
|
||||
'chatglm-6b': 2000,
|
||||
'chatglm2-6b': 32000,
|
||||
'chatglm3-6b-32k': 32000,
|
||||
'chatglm3-6b': 8000,
|
||||
'chatglm2-6b': 8000,
|
||||
}
|
||||
|
||||
max_tokens_alias = 'max_length' if model_name == 'chatglm2-6b' else 'max_tokens'
|
||||
|
||||
return ModelKwargsRules(
|
||||
temperature=KwargRule[float](min=0, max=2, default=1, precision=2),
|
||||
top_p=KwargRule[float](min=0, max=1, default=0.7, precision=2),
|
||||
presence_penalty=KwargRule[float](enabled=False),
|
||||
frequency_penalty=KwargRule[float](enabled=False),
|
||||
max_tokens=KwargRule[int](alias='max_token', min=10, max=model_max_tokens.get(model_name), default=2048, precision=0),
|
||||
max_tokens=KwargRule[int](alias=max_tokens_alias, min=10, max=model_max_tokens.get(model_name), default=2048, precision=0),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -85,16 +94,10 @@ class ChatGLMProvider(BaseModelProvider):
|
||||
raise CredentialsValidateFailedError('ChatGLM Endpoint URL must be provided.')
|
||||
|
||||
try:
|
||||
credential_kwargs = {
|
||||
'endpoint_url': credentials['api_base']
|
||||
}
|
||||
response = requests.get(f"{credentials['api_base']}/v1/models", timeout=5)
|
||||
|
||||
llm = ChatGLM(
|
||||
max_token=10,
|
||||
**credential_kwargs
|
||||
)
|
||||
|
||||
llm("ping")
|
||||
if response.status_code != 200:
|
||||
raise Exception('ChatGLM Endpoint URL is invalid.')
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ import json
|
||||
from typing import Type
|
||||
|
||||
import requests
|
||||
from xinference_client.client.restful.restful_client import Client
|
||||
|
||||
from core.helper import encrypter
|
||||
from core.model_providers.models.embedding.xinference_embedding import XinferenceEmbedding
|
||||
from core.model_providers.models.entity.model_params import KwargRule, ModelKwargsRules, ModelType, ModelMode
|
||||
from core.model_providers.models.llm.xinference_model import XinferenceModel
|
||||
from core.model_providers.models.reranking.xinference_reranking import XinferenceReranking
|
||||
from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError
|
||||
|
||||
from core.model_providers.models.base import BaseProviderModel
|
||||
@@ -40,6 +42,8 @@ class XinferenceProvider(BaseModelProvider):
|
||||
model_class = XinferenceModel
|
||||
elif model_type == ModelType.EMBEDDINGS:
|
||||
model_class = XinferenceEmbedding
|
||||
elif model_type == ModelType.RERANKING:
|
||||
model_class = XinferenceReranking
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -113,6 +117,10 @@ class XinferenceProvider(BaseModelProvider):
|
||||
)
|
||||
|
||||
embedding.embed_query("ping")
|
||||
elif model_type == ModelType.RERANKING:
|
||||
rerank_client = Client(credential_kwargs['server_url'])
|
||||
model = rerank_client.get_model(credential_kwargs['model_uid'])
|
||||
model.rerank(query="ping", documents=["ping", "pong"], top_n=2)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
|
||||
@@ -23,8 +23,14 @@
|
||||
"currency": "USD"
|
||||
},
|
||||
"claude-2": {
|
||||
"prompt": "11.02",
|
||||
"completion": "32.68",
|
||||
"prompt": "8.00",
|
||||
"completion": "24.00",
|
||||
"unit": "0.000001",
|
||||
"currency": "USD"
|
||||
},
|
||||
"claude-2.1": {
|
||||
"prompt": "8.00",
|
||||
"completion": "24.00",
|
||||
"unit": "0.000001",
|
||||
"currency": "USD"
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"model_flexibility": "configurable",
|
||||
"supported_model_types": [
|
||||
"text-generation",
|
||||
"embeddings"
|
||||
"embeddings",
|
||||
"reranking"
|
||||
]
|
||||
}
|
||||
@@ -207,22 +207,22 @@ class OrchestratorRuleParser:
|
||||
).first()
|
||||
|
||||
if not dataset:
|
||||
return None
|
||||
continue
|
||||
|
||||
if dataset and dataset.available_document_count == 0 and dataset.available_document_count == 0:
|
||||
return None
|
||||
continue
|
||||
dataset_ids.append(dataset.id)
|
||||
if retrieval_model == 'single':
|
||||
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
|
||||
top_k = retrieval_model['top_k']
|
||||
retrieval_model_config = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
|
||||
top_k = retrieval_model_config['top_k']
|
||||
|
||||
# dynamically adjust top_k when the remaining token number is not enough to support top_k
|
||||
# top_k = self._dynamic_calc_retrieve_k(dataset=dataset, top_k=top_k, rest_tokens=rest_tokens)
|
||||
|
||||
score_threshold = None
|
||||
score_threshold_enable = retrieval_model.get("score_threshold_enable")
|
||||
score_threshold_enable = retrieval_model_config.get("score_threshold_enable")
|
||||
if score_threshold_enable:
|
||||
score_threshold = retrieval_model.get("score_threshold")
|
||||
score_threshold = retrieval_model_config.get("score_threshold")
|
||||
|
||||
tool = DatasetRetrieverTool.from_dataset(
|
||||
dataset=dataset,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Dict
|
||||
|
||||
from httpx import Limits
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
from langchain.schema import ChatMessage, BaseMessage, HumanMessage, AIMessage, SystemMessage
|
||||
from langchain.utils import get_from_dict_or_env, check_package_version
|
||||
from pydantic import root_validator
|
||||
|
||||
@@ -29,8 +29,7 @@ class AnthropicLLM(ChatAnthropic):
|
||||
base_url=values["anthropic_api_url"],
|
||||
api_key=values["anthropic_api_key"],
|
||||
timeout=values["default_request_timeout"],
|
||||
max_retries=0,
|
||||
connection_pool_limits=Limits(max_connections=200, max_keepalive_connections=100),
|
||||
max_retries=0
|
||||
)
|
||||
values["async_client"] = anthropic.AsyncAnthropic(
|
||||
base_url=values["anthropic_api_url"],
|
||||
@@ -46,3 +45,16 @@ class AnthropicLLM(ChatAnthropic):
|
||||
"Please it install it with `pip install anthropic`."
|
||||
)
|
||||
return values
|
||||
|
||||
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
|
||||
if isinstance(message, ChatMessage):
|
||||
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
|
||||
elif isinstance(message, HumanMessage):
|
||||
message_text = f"{self.HUMAN_PROMPT} {message.content}"
|
||||
elif isinstance(message, AIMessage):
|
||||
message_text = f"{self.AI_PROMPT} {message.content}"
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_text = f"{message.content}"
|
||||
else:
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
return message_text
|
||||
|
||||
@@ -192,7 +192,7 @@ class DatasetMultiRetrieverTool(BaseTool):
|
||||
'search_method'] == 'hybrid_search':
|
||||
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'top_k': self.top_k,
|
||||
'score_threshold': self.score_threshold,
|
||||
@@ -210,7 +210,7 @@ class DatasetMultiRetrieverTool(BaseTool):
|
||||
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search,
|
||||
kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'search_method': 'hybrid_search',
|
||||
'embeddings': embeddings,
|
||||
|
||||
@@ -106,7 +106,7 @@ class DatasetRetrieverTool(BaseTool):
|
||||
if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
|
||||
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'top_k': self.top_k,
|
||||
'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
|
||||
@@ -124,7 +124,7 @@ class DatasetRetrieverTool(BaseTool):
|
||||
if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
|
||||
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'search_method': retrieval_model['search_method'],
|
||||
'embeddings': embeddings,
|
||||
|
||||
@@ -60,7 +60,7 @@ def _create_weaviate_client(**kwargs: Any) -> Any:
|
||||
|
||||
|
||||
def _default_score_normalizer(val: float) -> float:
|
||||
return 1 - 1 / (1 + np.exp(val))
|
||||
return 1 - val
|
||||
|
||||
|
||||
def _json_serializable(value: Any) -> Any:
|
||||
@@ -243,7 +243,8 @@ class Weaviate(VectorStore):
|
||||
query_obj = query_obj.with_where(kwargs.get("where_filter"))
|
||||
if kwargs.get("additional"):
|
||||
query_obj = query_obj.with_additional(kwargs.get("additional"))
|
||||
result = query_obj.with_bm25(query=content).with_limit(k).do()
|
||||
properties = ['text', 'dataset_id', 'doc_hash', 'doc_id', 'document_id']
|
||||
result = query_obj.with_bm25(query=query, properties=properties).with_limit(k).do()
|
||||
if "errors" in result:
|
||||
raise ValueError(f"Error during query: {result['errors']}")
|
||||
docs = []
|
||||
@@ -380,14 +381,14 @@ class Weaviate(VectorStore):
|
||||
result = (
|
||||
query_obj.with_near_vector(vector)
|
||||
.with_limit(k)
|
||||
.with_additional("vector")
|
||||
.with_additional(["vector", "distance"])
|
||||
.do()
|
||||
)
|
||||
else:
|
||||
result = (
|
||||
query_obj.with_near_text(content)
|
||||
.with_limit(k)
|
||||
.with_additional("vector")
|
||||
.with_additional(["vector", "distance"])
|
||||
.do()
|
||||
)
|
||||
|
||||
@@ -397,7 +398,7 @@ class Weaviate(VectorStore):
|
||||
docs_and_scores = []
|
||||
for res in result["data"]["Get"][self._index_name]:
|
||||
text = res.pop(self._text_key)
|
||||
score = np.dot(res["_additional"]["vector"], embedded_query)
|
||||
score = res["_additional"]["distance"]
|
||||
docs_and_scores.append((Document(page_content=text, metadata=res), score))
|
||||
return docs_and_scores
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from langchain.vectorstores import Weaviate
|
||||
from core.vector_store.vector.weaviate import Weaviate
|
||||
|
||||
|
||||
class WeaviateVectorStore(Weaviate):
|
||||
|
||||
@@ -35,7 +35,7 @@ docx2txt==0.8
|
||||
pypdfium2==4.16.0
|
||||
resend~=0.5.1
|
||||
pyjwt~=2.6.0
|
||||
anthropic~=0.3.4
|
||||
anthropic~=0.7.2
|
||||
newspaper3k==0.2.8
|
||||
google-api-python-client==2.90.0
|
||||
wikipedia==1.4.0
|
||||
@@ -48,7 +48,7 @@ huggingface_hub~=0.16.4
|
||||
transformers~=4.31.0
|
||||
stripe~=5.5.0
|
||||
pandas==1.5.3
|
||||
xinference-client~=0.5.4
|
||||
xinference-client~=0.6.4
|
||||
safetensors==0.3.2
|
||||
zhipuai==1.0.7
|
||||
werkzeug==2.3.7
|
||||
|
||||
@@ -232,7 +232,7 @@ class CompletionService:
|
||||
logging.exception("Unknown Error in completion")
|
||||
PubHandler.pub_error(user, generate_task_id, e)
|
||||
finally:
|
||||
db.session.commit()
|
||||
db.session.remove()
|
||||
|
||||
@classmethod
|
||||
def countdown_and_close(cls, flask_app: Flask, worker_thread, pubsub, detached_user,
|
||||
@@ -242,22 +242,25 @@ class CompletionService:
|
||||
|
||||
def close_pubsub():
|
||||
with flask_app.app_context():
|
||||
user = db.session.merge(detached_user)
|
||||
try:
|
||||
user = db.session.merge(detached_user)
|
||||
|
||||
sleep_iterations = 0
|
||||
while sleep_iterations < timeout and worker_thread.is_alive():
|
||||
if sleep_iterations > 0 and sleep_iterations % 10 == 0:
|
||||
PubHandler.ping(user, generate_task_id)
|
||||
sleep_iterations = 0
|
||||
while sleep_iterations < timeout and worker_thread.is_alive():
|
||||
if sleep_iterations > 0 and sleep_iterations % 10 == 0:
|
||||
PubHandler.ping(user, generate_task_id)
|
||||
|
||||
time.sleep(1)
|
||||
sleep_iterations += 1
|
||||
time.sleep(1)
|
||||
sleep_iterations += 1
|
||||
|
||||
if worker_thread.is_alive():
|
||||
PubHandler.stop(user, generate_task_id)
|
||||
try:
|
||||
pubsub.close()
|
||||
except Exception:
|
||||
pass
|
||||
if worker_thread.is_alive():
|
||||
PubHandler.stop(user, generate_task_id)
|
||||
try:
|
||||
pubsub.close()
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
db.session.remove()
|
||||
|
||||
countdown_thread = threading.Thread(target=close_pubsub)
|
||||
countdown_thread.start()
|
||||
@@ -394,7 +397,7 @@ class CompletionService:
|
||||
logging.exception(e)
|
||||
raise
|
||||
finally:
|
||||
db.session.commit()
|
||||
db.session.remove()
|
||||
|
||||
try:
|
||||
pubsub.unsubscribe(generate_channel)
|
||||
@@ -436,7 +439,7 @@ class CompletionService:
|
||||
logging.exception(e)
|
||||
raise
|
||||
finally:
|
||||
db.session.commit()
|
||||
db.session.remove()
|
||||
|
||||
try:
|
||||
pubsub.unsubscribe(generate_channel)
|
||||
|
||||
@@ -61,7 +61,7 @@ class HitTestingService:
|
||||
if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
|
||||
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'top_k': retrieval_model['top_k'],
|
||||
'score_threshold': retrieval_model['score_threshold'] if retrieval_model['score_threshold_enable'] else None,
|
||||
@@ -77,7 +77,7 @@ class HitTestingService:
|
||||
if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
|
||||
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'dataset': dataset,
|
||||
'dataset_id': str(dataset.id),
|
||||
'query': query,
|
||||
'search_method': retrieval_model['search_method'],
|
||||
'embeddings': embeddings,
|
||||
|
||||
@@ -4,6 +4,7 @@ from flask import current_app, Flask
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from core.index.vector_index.vector_index import VectorIndex
|
||||
from core.model_providers.model_factory import ModelFactory
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
|
||||
default_retrieval_model = {
|
||||
@@ -21,10 +22,13 @@ default_retrieval_model = {
|
||||
class RetrievalService:
|
||||
|
||||
@classmethod
|
||||
def embedding_search(cls, flask_app: Flask, dataset: Dataset, query: str,
|
||||
def embedding_search(cls, flask_app: Flask, dataset_id: str, query: str,
|
||||
top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
|
||||
all_documents: list, search_method: str, embeddings: Embeddings):
|
||||
with flask_app.app_context():
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.id == dataset_id
|
||||
).first()
|
||||
|
||||
vector_index = VectorIndex(
|
||||
dataset=dataset,
|
||||
@@ -56,10 +60,13 @@ class RetrievalService:
|
||||
all_documents.extend(documents)
|
||||
|
||||
@classmethod
|
||||
def full_text_index_search(cls, flask_app: Flask, dataset: Dataset, query: str,
|
||||
def full_text_index_search(cls, flask_app: Flask, dataset_id: str, query: str,
|
||||
top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
|
||||
all_documents: list, search_method: str, embeddings: Embeddings):
|
||||
with flask_app.app_context():
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.id == dataset_id
|
||||
).first()
|
||||
|
||||
vector_index = VectorIndex(
|
||||
dataset=dataset,
|
||||
|
||||
@@ -50,4 +50,7 @@ XINFERENCE_MODEL_UID=
|
||||
OPENLLM_SERVER_URL=
|
||||
|
||||
# LocalAI Credentials
|
||||
LOCALAI_SERVER_URL=
|
||||
LOCALAI_SERVER_URL=
|
||||
|
||||
# Cohere Credentials
|
||||
COHERE_API_KEY=
|
||||
@@ -0,0 +1,61 @@
|
||||
import json
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.schema import Document
|
||||
|
||||
from core.model_providers.models.reranking.cohere_reranking import CohereReranking
|
||||
from core.model_providers.providers.cohere_provider import CohereProvider
|
||||
from models.provider import Provider, ProviderType
|
||||
|
||||
|
||||
def get_mock_provider(valid_api_key):
|
||||
return Provider(
|
||||
id='provider_id',
|
||||
tenant_id='tenant_id',
|
||||
provider_name='cohere',
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
encrypted_config=json.dumps({'api_key': valid_api_key}),
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
|
||||
def get_mock_model():
|
||||
valid_api_key = os.environ['COHERE_API_KEY']
|
||||
provider = CohereProvider(provider=get_mock_provider(valid_api_key))
|
||||
return CohereReranking(
|
||||
model_provider=provider,
|
||||
name='rerank-english-v2.0'
|
||||
)
|
||||
|
||||
|
||||
def decrypt_side_effect(tenant_id, encrypted_api_key):
|
||||
return encrypted_api_key
|
||||
|
||||
|
||||
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
|
||||
def test_run(mock_decrypt):
|
||||
model = get_mock_model()
|
||||
|
||||
docs = []
|
||||
docs.append(Document(
|
||||
page_content='bye',
|
||||
metadata={
|
||||
"doc_id": 'a',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
docs.append(Document(
|
||||
page_content='hello',
|
||||
metadata={
|
||||
"doc_id": 'b',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
rst = model.rerank('hello', docs, None, 2)
|
||||
|
||||
assert rst[0].page_content == 'hello'
|
||||
@@ -0,0 +1,78 @@
|
||||
import json
|
||||
import os
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from langchain.schema import Document
|
||||
|
||||
from core.model_providers.models.entity.model_params import ModelType
|
||||
from core.model_providers.models.reranking.xinference_reranking import XinferenceReranking
|
||||
from core.model_providers.providers.xinference_provider import XinferenceProvider
|
||||
from models.provider import Provider, ProviderType, ProviderModel
|
||||
|
||||
|
||||
def get_mock_provider(valid_server_url, valid_model_uid):
|
||||
return Provider(
|
||||
id='provider_id',
|
||||
tenant_id='tenant_id',
|
||||
provider_name='xinference',
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
encrypted_config=json.dumps({'server_url': valid_server_url, 'model_uid': valid_model_uid}),
|
||||
is_valid=True,
|
||||
)
|
||||
|
||||
|
||||
def get_mock_model(mocker):
|
||||
valid_server_url = os.environ['XINFERENCE_SERVER_URL']
|
||||
valid_model_uid = os.environ['XINFERENCE_MODEL_UID']
|
||||
model_name = 'bge-reranker-base'
|
||||
provider = XinferenceProvider(provider=get_mock_provider(valid_server_url, valid_model_uid))
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_query.filter.return_value.first.return_value = ProviderModel(
|
||||
provider_name='xinference',
|
||||
model_name=model_name,
|
||||
model_type=ModelType.RERANKING.value,
|
||||
encrypted_config=json.dumps({
|
||||
'server_url': valid_server_url,
|
||||
'model_uid': valid_model_uid
|
||||
}),
|
||||
is_valid=True,
|
||||
)
|
||||
mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query)
|
||||
|
||||
return XinferenceReranking(
|
||||
model_provider=provider,
|
||||
name=model_name
|
||||
)
|
||||
|
||||
|
||||
def decrypt_side_effect(tenant_id, encrypted_api_key):
|
||||
return encrypted_api_key
|
||||
|
||||
|
||||
@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
|
||||
def test_run(mock_decrypt, mocker):
|
||||
model = get_mock_model(mocker)
|
||||
|
||||
docs = []
|
||||
docs.append(Document(
|
||||
page_content='bye',
|
||||
metadata={
|
||||
"doc_id": 'a',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
docs.append(Document(
|
||||
page_content='hello',
|
||||
metadata={
|
||||
"doc_id": 'b',
|
||||
"doc_hash": 'doc_hash',
|
||||
"document_id": 'document_id',
|
||||
"dataset_id": 'dataset_id',
|
||||
}
|
||||
))
|
||||
rst = model.rerank('hello', docs, None, 2)
|
||||
|
||||
assert rst[0].page_content == 'hello'
|
||||
@@ -31,12 +31,12 @@ def mock_chat_generate_invalid(messages: List[BaseMessage],
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any):
|
||||
raise anthropic.APIStatusError('Invalid credentials',
|
||||
request=httpx._models.Request(
|
||||
method='POST',
|
||||
url='https://api.anthropic.com/v1/completions',
|
||||
),
|
||||
response=httpx._models.Response(
|
||||
status_code=401,
|
||||
request=httpx._models.Request(
|
||||
method='POST',
|
||||
url='https://api.anthropic.com/v1/completions',
|
||||
)
|
||||
),
|
||||
body=None
|
||||
)
|
||||
|
||||
@@ -2,7 +2,9 @@ import pytest
|
||||
from unittest.mock import patch
|
||||
import json
|
||||
|
||||
import requests
|
||||
from langchain.schema import LLMResult, Generation, AIMessage, ChatResult, ChatGeneration
|
||||
from requests import Response
|
||||
|
||||
from core.model_providers.providers.base import CredentialsValidateFailedError
|
||||
from core.model_providers.providers.chatglm_provider import ChatGLMProvider
|
||||
@@ -26,8 +28,11 @@ def decrypt_side_effect(tenant_id, encrypted_key):
|
||||
|
||||
|
||||
def test_is_provider_credentials_valid_or_raise_valid(mocker):
|
||||
mocker.patch('langchain.llms.chatglm.ChatGLM._call',
|
||||
return_value="abc")
|
||||
mock_response = Response()
|
||||
mock_response.status_code = 200
|
||||
mock_response._content = json.dumps({'models': []}).encode('utf-8')
|
||||
mocker.patch('requests.get',
|
||||
return_value=mock_response)
|
||||
|
||||
MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL)
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.18.4
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
@@ -63,4 +63,4 @@ services:
|
||||
# environment:
|
||||
# QDRANT__API_KEY: 'difyai123456'
|
||||
# ports:
|
||||
# - "6333:6333"
|
||||
# - "6333:6333"
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.1'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.3.31
|
||||
image: langgenius/dify-api:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@@ -128,7 +128,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.3.31
|
||||
image: langgenius/dify-api:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
@@ -196,7 +196,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.3.31
|
||||
image: langgenius/dify-web:0.3.32
|
||||
restart: always
|
||||
environment:
|
||||
EDITION: SELF_HOSTED
|
||||
@@ -253,7 +253,7 @@ services:
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.18.4
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
@@ -280,7 +280,7 @@ services:
|
||||
# (if uncommented, you need to comment out the weaviate service above,
|
||||
# and set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
# qdrant:
|
||||
# image: qdrant/qdrant:latest
|
||||
# image: langgenius/qdrant:latest
|
||||
# restart: always
|
||||
# volumes:
|
||||
# - ./volumes/qdrant:/qdrant/storage
|
||||
@@ -302,4 +302,4 @@ services:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- "80:80"
|
||||
- "80:80"
|
||||
|
||||
@@ -194,7 +194,7 @@ const Answer: FC<IAnswerProps> = ({
|
||||
</div>
|
||||
)
|
||||
}
|
||||
<div className={cn(s.answerWrapWrap, 'chat-answer-container')}>
|
||||
<div className={cn(s.answerWrapWrap, 'chat-answer-container group')}>
|
||||
<div className={`${s.answerWrap} ${showEdit ? 'w-full' : ''}`}>
|
||||
<div className={`${s.answer} relative text-sm text-gray-900`}>
|
||||
<div className={'ml-2 py-3 px-4 bg-gray-100 rounded-tr-2xl rounded-b-2xl'}>
|
||||
@@ -280,7 +280,7 @@ const Answer: FC<IAnswerProps> = ({
|
||||
{!feedbackDisabled && renderFeedbackRating(feedback?.rating, !isHideFeedbackEdit, displayScene !== 'console')}
|
||||
</div>
|
||||
</div>
|
||||
{more && <MoreInfo more={more} isQuestion={false} />}
|
||||
{more && <MoreInfo className='hidden group-hover:block' more={more} isQuestion={false} />}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -5,15 +5,19 @@ import { useTranslation } from 'react-i18next'
|
||||
import type { MessageMore } from '../type'
|
||||
import { formatNumber } from '@/utils/format'
|
||||
|
||||
export type IMoreInfoProps = { more: MessageMore; isQuestion: boolean }
|
||||
export type IMoreInfoProps = {
|
||||
more: MessageMore
|
||||
isQuestion: boolean
|
||||
className?: string
|
||||
}
|
||||
|
||||
const MoreInfo: FC<IMoreInfoProps> = ({ more, isQuestion }) => {
|
||||
const MoreInfo: FC<IMoreInfoProps> = ({ more, isQuestion, className }) => {
|
||||
const { t } = useTranslation()
|
||||
return (<div className={`mt-1 w-full text-xs text-gray-400 !text-right ${isQuestion ? 'mr-2 text-right ' : 'ml-2 text-left float-right'}`}>
|
||||
<span>{`${t('appLog.detail.timeConsuming')} ${more.latency}${t('appLog.detail.second')}`}</span>
|
||||
<span>{`${t('appLog.detail.tokenCost')} ${formatNumber(more.tokens)}`}</span>
|
||||
<span>· </span>
|
||||
<span>{more.time} </span>
|
||||
return (<div className={`mt-1 w-full text-xs text-gray-400 ${isQuestion ? 'mr-2 text-right ' : 'pl-2 text-left float-right'} ${className}`}>
|
||||
<span className='mr-2'>{`${t('appLog.detail.timeConsuming')} ${more.latency}${t('appLog.detail.second')}`}</span>
|
||||
<span className='mr-2'>{`${t('appLog.detail.tokenCost')} ${formatNumber(more.tokens)}`}</span>
|
||||
<span className='mr-2'>·</span>
|
||||
<span>{more.time}</span>
|
||||
</div>)
|
||||
}
|
||||
export default React.memo(MoreInfo)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import React, { useRef, useState } from 'react'
|
||||
import { useClickAway } from 'ahooks'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import Toast from '../../base/toast'
|
||||
import { XClose } from '@/app/components/base/icons/src/vender/line/general'
|
||||
@@ -31,10 +30,10 @@ const ModifyRetrievalModal: FC<Props> = ({
|
||||
const { t } = useTranslation()
|
||||
const [retrievalConfig, setRetrievalConfig] = useState(value)
|
||||
|
||||
useClickAway(() => {
|
||||
if (ref)
|
||||
onHide()
|
||||
}, ref)
|
||||
// useClickAway(() => {
|
||||
// if (ref)
|
||||
// onHide()
|
||||
// }, ref)
|
||||
|
||||
const {
|
||||
rerankDefaultModel,
|
||||
|
||||
@@ -688,6 +688,7 @@ const Main: FC<IMainProps> = () => {
|
||||
onUnpin={handleUnpin}
|
||||
controlUpdateList={controlUpdateConversationList}
|
||||
onDelete={handleDelete}
|
||||
onStartChat={() => handleConversationIdChange('-1')}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -80,6 +80,13 @@ const config: ProviderConfig = {
|
||||
'zh-Hans': 'Embeddings',
|
||||
},
|
||||
},
|
||||
{
|
||||
key: 'reranking',
|
||||
label: {
|
||||
'en': 'Rerank',
|
||||
'zh-Hans': 'Rerank',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
|
||||
@@ -150,10 +150,11 @@ const Form: FC<FormProps> = ({
|
||||
|
||||
if (field.type === 'radio') {
|
||||
const options = typeof field.options === 'function' ? field.options(value) : field.options
|
||||
|
||||
return (
|
||||
<div key={field.key} className='py-3'>
|
||||
<div className={nameClassName}>{field.label[locale]}</div>
|
||||
<div className='grid grid-cols-2 gap-3'>
|
||||
<div className={`grid grid-cols-${options?.length} gap-3`}>
|
||||
{
|
||||
options?.map(option => (
|
||||
<div
|
||||
|
||||
@@ -668,7 +668,7 @@ const Main: FC<IMainProps> = ({
|
||||
onUnpin={handleUnpin}
|
||||
controlUpdateList={controlUpdateConversationList}
|
||||
onDelete={handleDelete}
|
||||
onStartChat={handleStartChat}
|
||||
onStartChat={() => handleConversationIdChange('-1')}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
4
web/global.d.ts
vendored
4
web/global.d.ts
vendored
@@ -1,5 +1,3 @@
|
||||
declare module 'lamejs';
|
||||
declare module 'react-18-input-autosize';
|
||||
declare module 'fetch-readablestream' {
|
||||
export default function fetchReadableStream(url: string, options?: RequestInit): Promise<Response>
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.3.31",
|
||||
"version": "0.3.32",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
@@ -36,7 +36,6 @@
|
||||
"echarts": "^5.4.1",
|
||||
"echarts-for-react": "^3.0.2",
|
||||
"emoji-mart": "^5.5.2",
|
||||
"fetch-readablestream": "^0.2.0",
|
||||
"i18next": "^22.4.13",
|
||||
"i18next-resources-to-backend": "^1.1.3",
|
||||
"immer": "^9.0.19",
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
import fetchStream from 'fetch-readablestream'
|
||||
import { API_PREFIX, IS_CE_EDITION, PUBLIC_API_PREFIX } from '@/config'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import type { MessageEnd, MessageReplace, ThoughtItem } from '@/app/components/app/chat/type'
|
||||
import { isSupportNativeFetchStream } from '@/utils/stream'
|
||||
|
||||
const TIME_OUT = 100000
|
||||
const supportNativeFetchStream = isSupportNativeFetchStream()
|
||||
|
||||
const ContentType = {
|
||||
json: 'application/json',
|
||||
@@ -223,9 +220,6 @@ const baseFetch = <T>(
|
||||
if (body && bodyStringify)
|
||||
options.body = JSON.stringify(body)
|
||||
|
||||
// for those do not support native fetch stream, we use fetch-readablestream as polyfill
|
||||
const doFetch = supportNativeFetchStream ? globalThis.fetch : fetchStream
|
||||
|
||||
// Handle timeout
|
||||
return Promise.race([
|
||||
new Promise((resolve, reject) => {
|
||||
@@ -234,7 +228,7 @@ const baseFetch = <T>(
|
||||
}, TIME_OUT)
|
||||
}),
|
||||
new Promise((resolve, reject) => {
|
||||
doFetch(urlWithPrefix, options as RequestInit)
|
||||
globalThis.fetch(urlWithPrefix, options as RequestInit)
|
||||
.then((res) => {
|
||||
const resClone = res.clone()
|
||||
// Error handler
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
// https://developer.chrome.com/articles/fetch-streaming-requests/#feature-detection
|
||||
export const isSupportNativeFetchStream = () => {
|
||||
const supportsRequestStreams = (() => {
|
||||
let duplexAccessed = false
|
||||
|
||||
const params = {
|
||||
body: new ReadableStream(),
|
||||
method: 'POST',
|
||||
get duplex() {
|
||||
duplexAccessed = true
|
||||
return 'half'
|
||||
},
|
||||
}
|
||||
|
||||
const hasContentType = new Request('', params).headers.has('Content-Type')
|
||||
|
||||
return duplexAccessed && !hasContentType
|
||||
})()
|
||||
|
||||
return supportsRequestStreams
|
||||
}
|
||||
Reference in New Issue
Block a user