chore: merge main

This commit is contained in:
Joel
2024-03-18 10:34:57 +08:00
28 changed files with 444 additions and 107 deletions

View File

@@ -1,17 +1,32 @@
name: Build and Push API Image
name: Build and Push API & Web
on:
push:
branches:
- 'main'
- 'deploy/dev'
- "main"
- "deploy/dev"
release:
types: [ published ]
types: [published]
env:
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
DIFY_WEB_IMAGE_NAME: ${{ vars.DIFY_WEB_IMAGE_NAME || 'langgenius/dify-web' }}
DIFY_API_IMAGE_NAME: ${{ vars.DIFY_API_IMAGE_NAME || 'langgenius/dify-api' }}
jobs:
build-and-push:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
strategy:
matrix:
include:
- service_name: "web"
image_name_env: "DIFY_WEB_IMAGE_NAME"
context: "web"
- service_name: "api"
image_name_env: "DIFY_API_IMAGE_NAME"
context: "api"
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -22,14 +37,14 @@ jobs:
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: langgenius/dify-api
images: ${{ env[matrix.image_name_env] }}
tags: |
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
@@ -39,22 +54,11 @@ jobs:
- name: Build and push
uses: docker/build-push-action@v5
with:
context: "{{defaultContext}}:api"
context: "{{defaultContext}}:${{ matrix.context }}"
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
build-args: COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ secrets.SSH_SCRIPT }}

View File

@@ -1,60 +0,0 @@
name: Build and Push WEB Image
on:
push:
branches:
- 'main'
- 'deploy/dev'
release:
types: [ published ]
jobs:
build-and-push:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: langgenius/dify-web
tags: |
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: "{{defaultContext}}:web"
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ secrets.SSH_SCRIPT }}

24
.github/workflows/deploy-dev.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Deploy Dev
on:
workflow_run:
workflows: ["Build and Push API & Web"]
branches:
- "deploy/dev"
types:
- completed
jobs:
deploy:
runs-on: ubuntu-latest
if: |
github.event.workflow_run.conclusion == 'success'
steps:
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ vars.SSH_SCRIPT || secrets.SSH_SCRIPT }}

43
Makefile Normal file
View File

@@ -0,0 +1,43 @@
# Variables
DOCKER_REGISTRY=langgenius
WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web
API_IMAGE=$(DOCKER_REGISTRY)/dify-api
VERSION=latest
# Build Docker images
build-web:
@echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..."
docker build -t $(WEB_IMAGE):$(VERSION) ./web
@echo "Web Docker image built successfully: $(WEB_IMAGE):$(VERSION)"
build-api:
@echo "Building API Docker image: $(API_IMAGE):$(VERSION)..."
docker build -t $(API_IMAGE):$(VERSION) ./api
@echo "API Docker image built successfully: $(API_IMAGE):$(VERSION)"
# Push Docker images
push-web:
@echo "Pushing web Docker image: $(WEB_IMAGE):$(VERSION)..."
docker push $(WEB_IMAGE):$(VERSION)
@echo "Web Docker image pushed successfully: $(WEB_IMAGE):$(VERSION)"
push-api:
@echo "Pushing API Docker image: $(API_IMAGE):$(VERSION)..."
docker push $(API_IMAGE):$(VERSION)
@echo "API Docker image pushed successfully: $(API_IMAGE):$(VERSION)"
# Build all images
build-all: build-web build-api
# Push all images
push-all: push-web push-api
build-push-api: build-api push-api
build-push-web: build-web push-web
# Build and push all images
build-push-all: build-all push-all
@echo "All Docker images have been built and pushed."
# Phony targets
.PHONY: build-web build-api push-web push-api build-all push-all build-push-all

View File

@@ -52,6 +52,9 @@ class ModelConfigResource(Resource):
masked_parameter_map = {}
tool_map = {}
for tool in agent_mode.get('tools') or []:
if not isinstance(tool, dict) or len(tool.keys()) <= 3:
continue
agent_tool_entity = AgentToolEntity(**tool)
# get tool
try:

View File

@@ -35,7 +35,7 @@ from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotIni
from core.file.file_obj import FileObj
from core.model_runtime.entities.message_entities import PromptMessageRole
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.prompt.prompt_template import PromptTemplateParser
from core.provider_manager import ProviderManager
@@ -195,8 +195,6 @@ class ApplicationManager:
except ValidationError as e:
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:
logger.exception("Unknown Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)

View File

@@ -20,6 +20,7 @@
- jina
- chatglm
- xinference
- yi
- openllm
- localai
- openai_api_compatible

View File

@@ -17,10 +17,9 @@ class BedrockProvider(ModelProvider):
"""
try:
model_instance = self.get_model_instance(ModelType.LLM)
# Use `gemini-pro` model for validate,
bedrock_validate_model_name = credentials.get('model_for_validation', 'amazon.titan-text-lite-v1')
model_instance.validate_credentials(
model='amazon.titan-text-lite-v1',
model=bedrock_validate_model_name,
credentials=credentials
)
except CredentialsValidateFailedError as ex:

View File

@@ -69,3 +69,12 @@ provider_credential_schema:
label:
en_US: AWS GovCloud (US-West)
zh_Hans: AWS GovCloud (US-West)
- variable: model_for_validation
required: false
label:
en_US: Available Model Name
zh_Hans: 可用模型名称
type: text-input
placeholder:
en_US: A model you have access to (e.g. amazon.titan-text-lite-v1) for validation.
zh_Hans: 为了进行验证,请输入一个您可用的模型名称 (例如amazon.titan-text-lite-v1)

View File

@@ -1,6 +1,5 @@
from collections.abc import Generator
from typing import cast
from urllib.parse import urljoin
from httpx import Timeout
from openai import (
@@ -19,6 +18,7 @@ from openai import (
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion_message import FunctionCall
from openai.types.completion import Completion
from yarl import URL
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
@@ -181,7 +181,7 @@ class LocalAILarguageModel(LargeLanguageModel):
UserPromptMessage(content='ping')
], model_parameters={
'max_tokens': 10,
}, stop=[])
}, stop=[], stream=False)
except Exception as ex:
raise CredentialsValidateFailedError(f'Invalid credentials {str(ex)}')
@@ -227,6 +227,12 @@ class LocalAILarguageModel(LargeLanguageModel):
)
]
model_properties = {
ModelPropertyKey.MODE: completion_model,
} if completion_model else {}
model_properties[ModelPropertyKey.CONTEXT_SIZE] = int(credentials.get('context_size', '2048'))
entity = AIModelEntity(
model=model,
label=I18nObject(
@@ -234,7 +240,7 @@ class LocalAILarguageModel(LargeLanguageModel):
),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.LLM,
model_properties={ ModelPropertyKey.MODE: completion_model } if completion_model else {},
model_properties=model_properties,
parameter_rules=rules
)
@@ -319,7 +325,7 @@ class LocalAILarguageModel(LargeLanguageModel):
client_kwargs = {
"timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0),
"api_key": "1",
"base_url": urljoin(credentials['server_url'], 'v1'),
"base_url": str(URL(credentials['server_url']) / 'v1'),
}
return client_kwargs

View File

@@ -56,3 +56,12 @@ model_credential_schema:
placeholder:
zh_Hans: 在此输入LocalAI的服务器地址如 http://192.168.1.100:8080
en_US: Enter the url of your LocalAI, e.g. http://192.168.1.100:8080
- variable: context_size
label:
zh_Hans: 上下文大小
en_US: Context size
placeholder:
zh_Hans: 输入上下文大小
en_US: Enter context size
required: false
type: text-input

View File

@@ -1,11 +1,12 @@
import time
from json import JSONDecodeError, dumps
from os.path import join
from typing import Optional
from requests import post
from yarl import URL
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
@@ -57,7 +58,7 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel):
}
try:
response = post(join(url, 'embeddings'), headers=headers, data=dumps(data), timeout=10)
response = post(str(URL(url) / 'embeddings'), headers=headers, data=dumps(data), timeout=10)
except Exception as e:
raise InvokeConnectionError(str(e))
@@ -113,6 +114,27 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel):
# use GPT2Tokenizer to get num tokens
num_tokens += self._get_num_tokens_by_gpt2(text)
return num_tokens
def _get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
"""
Get customizable model schema
:param model: model name
:param credentials: model credentials
:return: model schema
"""
return AIModelEntity(
model=model,
label=I18nObject(zh_Hans=model, en_US=model),
model_type=ModelType.TEXT_EMBEDDING,
features=[],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get('context_size', '512')),
ModelPropertyKey.MAX_CHUNKS: 1,
},
parameter_rules=[]
)
def validate_credentials(self, model: str, credentials: dict) -> None:
"""

View File

@@ -0,0 +1,20 @@
<svg width="80" height="22" viewBox="0 0 450 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="300" cy="300" r="300" style="fill:rgb(0,52,37);"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M452.119,361.224C452.119,349.527 442.623,340.031 430.926,340.031C419.229,340.031 409.733,349.527 409.733,361.224L409.733,470.486C409.733,482.183 419.229,491.679 430.926,491.679C442.623,491.679 452.119,482.183 452.119,470.486L452.119,361.224Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M422.005,133.354C413.089,125.771 399.714,126.851 392.131,135.767L273.699,275.021C270.643,278.614 268.994,282.932 268.698,287.302C268.532,288.371 268.446,289.466 268.446,290.581L268.446,468.603C268.446,480.308 277.934,489.796 289.639,489.796C301.344,489.796 310.832,480.308 310.832,468.603L310.832,296.784L424.419,163.228C432.002,154.312 430.921,140.937 422.005,133.354Z" style="fill:white;"/>
</g>
<g transform="matrix(0.13359,-0.109514,0.109514,0.13359,-0.630793,25.9151)">
<path d="M156.358,155.443C156.358,143.746 146.862,134.25 135.165,134.25C123.468,134.25 113.972,143.746 113.972,155.443L113.972,287.802C113.972,299.499 123.468,308.995 135.165,308.995C146.862,308.995 156.358,299.499 156.358,287.802L156.358,155.443Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="460.126" cy="279.278" r="25.903" style="fill:rgb(0,255,37);"/>
</g>
<g transform="matrix(1,0,0,1,-77.4848,13.0849)">
<text x="210.275px" y="74.595px" style="font-family:'AlibabaPuHuiTi_3_55_Regular', 'Alibaba PuHuiTi 3.0', serif;font-size:80px;">01<tspan x="294.355px " y="74.595px ">.</tspan>AI</text>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -0,0 +1,20 @@
<svg width="80" height="22" viewBox="0 0 450 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="300" cy="300" r="300" style="fill:rgb(0,52,37);"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M452.119,361.224C452.119,349.527 442.623,340.031 430.926,340.031C419.229,340.031 409.733,349.527 409.733,361.224L409.733,470.486C409.733,482.183 419.229,491.679 430.926,491.679C442.623,491.679 452.119,482.183 452.119,470.486L452.119,361.224Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M422.005,133.354C413.089,125.771 399.714,126.851 392.131,135.767L273.699,275.021C270.643,278.614 268.994,282.932 268.698,287.302C268.532,288.371 268.446,289.466 268.446,290.581L268.446,468.603C268.446,480.308 277.934,489.796 289.639,489.796C301.344,489.796 310.832,480.308 310.832,468.603L310.832,296.784L424.419,163.228C432.002,154.312 430.921,140.937 422.005,133.354Z" style="fill:white;"/>
</g>
<g transform="matrix(0.13359,-0.109514,0.109514,0.13359,-0.630793,25.9151)">
<path d="M156.358,155.443C156.358,143.746 146.862,134.25 135.165,134.25C123.468,134.25 113.972,143.746 113.972,155.443L113.972,287.802C113.972,299.499 123.468,308.995 135.165,308.995C146.862,308.995 156.358,299.499 156.358,287.802L156.358,155.443Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="460.126" cy="279.278" r="25.903" style="fill:rgb(0,255,37);"/>
</g>
<g transform="matrix(1,0,0,1,-77.4848,13.0849)">
<text x="210.275px" y="74.595px" style="font-family:'AlibabaPuHuiTi_3_55_Regular', 'Alibaba PuHuiTi 3.0', serif;font-size:80px;">零一万物</text>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -0,0 +1,7 @@
<svg width="24" height="24" viewBox="0 0 600 600" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="300" cy="300" r="300" fill="#003425"/>
<rect x="409.733" y="340.031" width="42.3862" height="151.648" rx="21.1931" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M422.005 133.354C413.089 125.771 399.714 126.851 392.131 135.767L273.699 275.021C270.643 278.614 268.994 282.932 268.698 287.302C268.532 288.371 268.446 289.466 268.446 290.581V468.603C268.446 480.308 277.934 489.796 289.639 489.796C301.344 489.796 310.832 480.308 310.832 468.603V296.784L424.419 163.228C432.002 154.312 430.921 140.937 422.005 133.354Z" fill="white"/>
<rect x="113.972" y="134.25" width="42.3862" height="174.745" rx="21.1931" transform="rotate(-39.3441 113.972 134.25)" fill="white"/>
<circle cx="460.126" cy="279.278" r="25.9027" fill="#00FF25"/>
</svg>

After

Width:  |  Height:  |  Size: 882 B

View File

@@ -0,0 +1,3 @@
- yi-34b-chat-0205
- yi-34b-chat-200k
- yi-vl-plus

View File

@@ -0,0 +1,30 @@
from collections.abc import Generator
from typing import Optional, Union
from core.model_runtime.entities.llm_entities import LLMResult
from core.model_runtime.entities.message_entities import (
PromptMessage,
PromptMessageTool,
)
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
class YiLargeLanguageModel(OAIAPICompatLargeLanguageModel):
def _invoke(self, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) \
-> Union[LLMResult, Generator]:
self._add_custom_parameters(credentials)
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
def validate_credentials(self, model: str, credentials: dict) -> None:
self._add_custom_parameters(credentials)
super().validate_credentials(model, credentials)
@staticmethod
def _add_custom_parameters(credentials: dict) -> None:
credentials['mode'] = 'chat'
if 'endpoint_url' not in credentials or credentials['endpoint_url'] == "":
credentials['endpoint_url'] = 'https://api.lingyiwanwu.com/v1'

View File

@@ -0,0 +1,28 @@
model: yi-34b-chat-0205
label:
zh_Hans: yi-34b-chat-0205
en_US: yi-34b-chat-0205
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 4096
parameter_rules:
- name: max_tokens
use_template: max_tokens
type: int
default: 512
min: 1
max: 4096
- name: temperature
use_template: temperature
type: float
default: 0.7
min: 0
max: 2
pricing:
input: '0.0025'
output: '0.0025'
unit: '0.00001'
currency: RMB

View File

@@ -0,0 +1,28 @@
model: yi-34b-chat-200k
label:
zh_Hans: yi-34b-chat-200k
en_US: yi-34b-chat-200k
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: max_tokens
use_template: max_tokens
type: int
default: 1024
min: 1
max: 200000
- name: temperature
use_template: temperature
type: float
default: 0.7
min: 0
max: 2
pricing:
input: '0.012'
output: '0.012'
unit: '0.00001'
currency: RMB

View File

@@ -0,0 +1,28 @@
model: yi-vl-plus
label:
zh_Hans: yi-vl-plus
en_US: yi-vl-plus
model_type: llm
features:
- vision
model_properties:
mode: chat
context_size: 4096
parameter_rules:
- name: max_tokens
use_template: max_tokens
type: int
default: 512
min: 1
max: 4096
- name: temperature
use_template: temperature
type: float
default: 0.7
min: 0
max: 2
pricing:
input: '0.01'
output: '0.03'
unit: '0.001'
currency: USD

View File

@@ -0,0 +1,32 @@
import logging
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
logger = logging.getLogger(__name__)
class YiProvider(ModelProvider):
def validate_provider_credentials(self, credentials: dict) -> None:
"""
Validate provider credentials
if validate failed, raise exception
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
"""
try:
model_instance = self.get_model_instance(ModelType.LLM)
# Use `yi-34b-chat-0205` model for validate,
# no matter what model you pass in, text completion model or chat model
model_instance.validate_credentials(
model='yi-34b-chat-0205',
credentials=credentials
)
except CredentialsValidateFailedError as ex:
raise ex
except Exception as ex:
logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
raise ex

View File

@@ -0,0 +1,41 @@
provider: yi
label:
en_US: 01.AI
zh_Hans: 零一万物
description:
en_US: Models provided by 01.AI, such as yi-34b-chat and yi-vl-plus.
zh_Hans: 零一万物提供的模型,例如 yi-34b-chat 和 yi-vl-plus。
icon_small:
en_US: icon_s_en.svg
icon_large:
en_US: icon_l_en.svg
background: "#EFFDFD"
help:
title:
en_US: Get your API Key from 01.ai
zh_Hans: 从零一万物获取 API Key
url:
en_US: https://platform.lingyiwanwu.com/apikeys
supported_model_types:
- llm
configurate_methods:
- predefined-model
provider_credential_schema:
credential_form_schemas:
- variable: api_key
label:
en_US: API Key
type: secret-input
required: true
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
- variable: endpoint_url
label:
zh_Hans: 自定义 API endpoint 地址
en_US: CUstom API endpoint URL
type: text-input
required: false
placeholder:
zh_Hans: Base URL, e.g. https://api.lingyiwanwu.com/v1
en_US: Base URL, e.g. https://api.lingyiwanwu.com/v1

View File

@@ -1,4 +1,6 @@
import matplotlib.pyplot as plt
from fontTools.ttLib import TTFont
from matplotlib.font_manager import findSystemFonts
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.chart.tools.line import LinearChartTool
@@ -6,6 +8,37 @@ from core.tools.provider.builtin_tool_provider import BuiltinToolProviderControl
# use a business theme
plt.style.use('seaborn-v0_8-darkgrid')
plt.rcParams['axes.unicode_minus'] = False
def init_fonts():
fonts = findSystemFonts()
popular_unicode_fonts = [
'Arial Unicode MS', 'DejaVu Sans', 'DejaVu Sans Mono', 'DejaVu Serif', 'FreeMono', 'FreeSans', 'FreeSerif',
'Liberation Mono', 'Liberation Sans', 'Liberation Serif', 'Noto Mono', 'Noto Sans', 'Noto Serif', 'Open Sans',
'Roboto', 'Source Code Pro', 'Source Sans Pro', 'Source Serif Pro', 'Ubuntu', 'Ubuntu Mono'
]
supported_fonts = []
for font_path in fonts:
try:
font = TTFont(font_path)
# get family name
family_name = font['name'].getName(1, 3, 1).toUnicode()
if family_name in popular_unicode_fonts:
supported_fonts.append(family_name)
except:
pass
plt.rcParams['font.family'] = 'sans-serif'
# sort by order of popular_unicode_fonts
for font in popular_unicode_fonts:
if font in supported_fonts:
plt.rcParams['font.sans-serif'] = font
break
init_fonts()
class ChartProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:

View File

@@ -171,8 +171,8 @@ const ActivateForm = () => {
</label>
<div className="relative mt-1 rounded-md shadow-sm">
<SimpleSelect
defaultValue={defaultLanguage()}
items={languages}
defaultValue={LanguagesSupported[0]}
items={languages.filter(item => item.supported)}
onSelect={(item) => {
setLanguage(item.value as string)
}}

View File

@@ -4,6 +4,7 @@ import type {
} from 'react'
import {
memo,
useCallback,
useEffect,
useRef,
useState,
@@ -82,22 +83,20 @@ const Chat: FC<ChatProps> = ({
const chatContainerInnerRef = useRef<HTMLDivElement>(null)
const chatFooterRef = useRef<HTMLDivElement>(null)
const chatFooterInnerRef = useRef<HTMLDivElement>(null)
const userScrolledRef = useRef(false)
const handleScrolltoBottom = () => {
if (chatContainerRef.current)
const handleScrolltoBottom = useCallback(() => {
if (chatContainerRef.current && !userScrolledRef.current)
chatContainerRef.current.scrollTop = chatContainerRef.current.scrollHeight
}
const handleWindowResize = () => {
if (chatContainerRef.current)
setWidth(document.body.clientWidth - (chatContainerRef.current?.clientWidth + 16) - 8)
}, [])
const handleWindowResize = useCallback(() => {
if (chatContainerRef.current && chatFooterRef.current)
chatFooterRef.current.style.width = `${chatContainerRef.current.clientWidth}px`
if (chatContainerInnerRef.current && chatFooterInnerRef.current)
chatFooterInnerRef.current.style.width = `${chatContainerInnerRef.current.clientWidth}px`
}
}, [])
useThrottleEffect(() => {
handleScrolltoBottom()
@@ -107,7 +106,7 @@ const Chat: FC<ChatProps> = ({
useEffect(() => {
window.addEventListener('resize', debounce(handleWindowResize))
return () => window.removeEventListener('resize', handleWindowResize)
}, [])
}, [handleWindowResize])
useEffect(() => {
if (chatFooterRef.current && chatContainerRef.current) {
@@ -126,7 +125,19 @@ const Chat: FC<ChatProps> = ({
resizeObserver.disconnect()
}
}
}, [chatFooterRef, chatContainerRef])
}, [handleScrolltoBottom])
useEffect(() => {
const chatContainer = chatContainerRef.current
if (chatContainer) {
const setUserScrolled = () => {
if (chatContainer)
userScrolledRef.current = chatContainer.scrollHeight - chatContainer.scrollTop >= chatContainer.clientHeight + 300
}
chatContainer.addEventListener('scroll', setUserScrolled)
return () => chatContainer.removeEventListener('scroll', setUserScrolled)
}
}, [])
const hasTryToAsk = config?.suggested_questions_after_answer?.enabled && !!suggestedQuestions?.length && onSend

View File

@@ -153,8 +153,6 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx'
"user": "abc-123"
}'
```
```
</CodeGroup>
### blocking
<CodeGroup title="Response">