Merge branch 'fix/chore-fix' into dev/plugin-deploy

This commit is contained in:
Novice Lee
2025-01-09 08:48:00 +08:00
231 changed files with 3564 additions and 2382 deletions

View File

@@ -105,6 +105,9 @@ FILES_ACCESS_TIMEOUT=300
# Access token expiration time in minutes
ACCESS_TOKEN_EXPIRE_MINUTES=60
# Refresh token expiration time in days
REFRESH_TOKEN_EXPIRE_DAYS=30
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
APP_MAX_ACTIVE_REQUESTS=0
APP_MAX_EXECUTION_TIME=1200
@@ -123,10 +126,13 @@ DIFY_PORT=5001
# The number of API server workers, i.e., the number of workers.
# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
SERVER_WORKER_AMOUNT=
SERVER_WORKER_AMOUNT=1
# Defaults to gevent. If using windows, it can be switched to sync or solo.
SERVER_WORKER_CLASS=
SERVER_WORKER_CLASS=gevent
# Default number of worker connections, the default is 10.
SERVER_WORKER_CONNECTIONS=10
# Similar to SERVER_WORKER_CLASS.
# If using windows, it can be switched to sync or solo.
@@ -377,7 +383,7 @@ SUPABASE_URL=your-server-url
# ------------------------------
# The type of vector store to use.
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
VECTOR_STORE=weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
@@ -397,6 +403,7 @@ MILVUS_URI=http://127.0.0.1:19530
MILVUS_TOKEN=
MILVUS_USER=root
MILVUS_PASSWORD=Milvus
MILVUS_ENABLE_HYBRID_SEARCH=False
# MyScale configuration, only available when VECTOR_STORE is `myscale`
# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to:
@@ -506,7 +513,7 @@ TENCENT_VECTOR_DB_SHARD=1
TENCENT_VECTOR_DB_REPLICAS=2
# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch`
ELASTICSEARCH_HOST=0.0.0.0
ELASTICSEARCH_HOST=elasticsearch
ELASTICSEARCH_PORT=9200
ELASTICSEARCH_USERNAME=elastic
ELASTICSEARCH_PASSWORD=elastic
@@ -923,6 +930,9 @@ CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100
# The maximum number of top-k value for RAG.
TOP_K_MAX_VALUE=10
# ------------------------------
# Plugin Daemon Configuration
# ------------------------------
@@ -947,3 +957,4 @@ ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace-plugin.dify.dev

View File

@@ -73,6 +73,7 @@ services:
CSP_WHITELIST: ${CSP_WHITELIST:-}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace-plugin.dify.dev}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
# The postgres database.
db:
@@ -92,7 +93,7 @@ services:
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -111,7 +112,7 @@ services:
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
test: [ 'CMD', 'redis-cli', 'ping' ]
# The DifySandbox
sandbox:
@@ -131,7 +132,7 @@ services:
volumes:
- ./volumes/sandbox/dependencies:/dependencies
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
networks:
- ssrf_proxy_network
@@ -167,12 +168,7 @@ services:
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
@@ -201,8 +197,8 @@ services:
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: ['/docker-entrypoint.sh']
command: ['tail', '-f', '/dev/null']
entrypoint: [ '/docker-entrypoint.sh' ]
command: [ 'tail', '-f', '/dev/null' ]
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
@@ -219,12 +215,7 @@ services:
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
@@ -316,7 +307,7 @@ services:
working_dir: /opt/couchbase
stdin_open: true
tty: true
entrypoint: [""]
entrypoint: [ "" ]
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
@@ -345,7 +336,7 @@ services:
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -367,7 +358,7 @@ services:
volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -432,7 +423,7 @@ services:
- ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ['CMD', 'etcdctl', 'endpoint', 'health']
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
interval: 30s
timeout: 20s
retries: 3
@@ -451,7 +442,7 @@ services:
- ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
interval: 30s
timeout: 20s
retries: 3
@@ -463,7 +454,7 @@ services:
image: milvusdb/milvus:v2.3.1
profiles:
- milvus
command: ['milvus', 'run', 'standalone']
command: [ 'milvus', 'run', 'standalone' ]
environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
@@ -471,7 +462,7 @@ services:
volumes:
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
interval: 30s
start_period: 90s
timeout: 20s
@@ -559,7 +550,7 @@ services:
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
healthcheck:
test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
interval: 30s
timeout: 10s
retries: 50
@@ -587,7 +578,7 @@ services:
ports:
- ${KIBANA_PORT:-5601}:5601
healthcheck:
test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -27,12 +27,14 @@ x-shared-env: &shared-api-worker-env
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
DIFY_PORT: ${DIFY_PORT:-5001}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
@@ -136,6 +138,7 @@ x-shared-env: &shared-api-worker-env
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
MILVUS_USER: ${MILVUS_USER:-root}
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
MYSCALE_USER: ${MYSCALE_USER:-default}
@@ -401,6 +404,7 @@ x-shared-env: &shared-api-worker-env
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
services:
# API service
@@ -478,6 +482,7 @@ services:
CSP_WHITELIST: ${CSP_WHITELIST:-}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace-plugin.dify.dev}
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace-plugin.dify.dev}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
# The postgres database.
db:
@@ -497,7 +502,7 @@ services:
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -516,7 +521,7 @@ services:
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
test: [ 'CMD', 'redis-cli', 'ping' ]
# The DifySandbox
sandbox:
@@ -536,7 +541,7 @@ services:
volumes:
- ./volumes/sandbox/dependencies:/dependencies
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
networks:
- ssrf_proxy_network
@@ -573,12 +578,7 @@ services:
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
@@ -607,8 +607,8 @@ services:
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: ['/docker-entrypoint.sh']
command: ['tail', '-f', '/dev/null']
entrypoint: [ '/docker-entrypoint.sh' ]
command: [ 'tail', '-f', '/dev/null' ]
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
@@ -625,12 +625,7 @@ services:
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
@@ -722,7 +717,7 @@ services:
working_dir: /opt/couchbase
stdin_open: true
tty: true
entrypoint: [""]
entrypoint: [ "" ]
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
@@ -751,7 +746,7 @@ services:
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -773,7 +768,7 @@ services:
volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
@@ -838,7 +833,7 @@ services:
- ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ['CMD', 'etcdctl', 'endpoint', 'health']
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
interval: 30s
timeout: 20s
retries: 3
@@ -857,7 +852,7 @@ services:
- ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
interval: 30s
timeout: 20s
retries: 3
@@ -866,10 +861,10 @@ services:
milvus-standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.3.1
image: milvusdb/milvus:v2.5.0-beta
profiles:
- milvus
command: ['milvus', 'run', 'standalone']
command: [ 'milvus', 'run', 'standalone' ]
environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
@@ -877,7 +872,7 @@ services:
volumes:
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
interval: 30s
start_period: 90s
timeout: 20s
@@ -950,22 +945,30 @@ services:
container_name: elasticsearch
profiles:
- elasticsearch
- elasticsearch-ja
restart: always
volumes:
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- dify_es01_data:/usr/share/elasticsearch/data
environment:
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
VECTOR_STORE: ${VECTOR_STORE:-}
cluster.name: dify-es-cluster
node.name: dify-es0
discovery.type: single-node
xpack.license.self_generated.type: trial
xpack.license.self_generated.type: basic
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
deploy:
resources:
limits:
memory: 2g
entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
healthcheck:
test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
interval: 30s
timeout: 10s
retries: 50
@@ -993,7 +996,7 @@ services:
ports:
- ${KIBANA_PORT:-5601}:5601
healthcheck:
test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -e
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
# Check if the ICU tokenizer plugin is installed
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
printf '%s\n' "Installing the ICU tokenizer plugin"
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
printf '%s\n' "Failed to install the ICU tokenizer plugin"
exit 1
fi
fi
# Check if the Japanese language analyzer plugin is installed
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
printf '%s\n' "Installing the Japanese language analyzer plugin"
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
exit 1
fi
fi
fi
# Run the original entrypoint script
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh