[Bug: 900] Create a version of MultimodalQnA example with Zilliz/Milvus as Vector DB (#1639)
Signed-off-by: Shifani Rajabose <srajabose@habana.ai> Signed-off-by: Pallavi Jaini <pallavi.jaini@intel.com>
This commit is contained in:
@@ -235,6 +235,17 @@ cd GenAIExamples/MultimodalQnA/docker_compose/intel/cpu/xeon/
|
||||
docker compose -f compose.yaml up -d
|
||||
```
|
||||
|
||||
> Alternatively, you can run docker compose with `compose_milvus.yaml` to use the Milvus vector database:
|
||||
|
||||
```bash
|
||||
export MILVUS_HOST=${host_ip}
|
||||
export MILVUS_PORT=19530
|
||||
export MILVUS_RETRIEVER_PORT=7000
|
||||
export COLLECTION_NAME=mm_rag_milvus
|
||||
cd GenAIExamples/MultimodalQnA/docker_compose/intel/cpu/xeon/
|
||||
docker compose -f compose_milvus.yaml up -d
|
||||
```
|
||||
|
||||
### Validate Microservices
|
||||
|
||||
1. embedding-multimodal-bridgetower
|
||||
|
||||
237
MultimodalQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml
Normal file
237
MultimodalQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml
Normal file
@@ -0,0 +1,237 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
whisper-service:
|
||||
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
||||
container_name: whisper-service
|
||||
ports:
|
||||
- "${WHISPER_PORT}:7066"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
restart: unless-stopped
|
||||
|
||||
milvus-etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
environment:
|
||||
- ETCD_AUTO_COMPACTION_MODE=revision
|
||||
- ETCD_AUTO_COMPACTION_RETENTION=1000
|
||||
- ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
- ETCD_SNAPSHOT_COUNT=50000
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
milvus-minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
ports:
|
||||
- "${MINIO_PORT1:-5044}:9001"
|
||||
- "${MINIO_PORT2:-5043}:9000"
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.4.6
|
||||
command: ["milvus", "run", "standalone"]
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
environment:
|
||||
ETCD_ENDPOINTS: milvus-etcd:2379
|
||||
MINIO_ADDRESS: milvus-minio:9000
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/config/milvus.yaml:/milvus/configs/milvus.yaml
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
ports:
|
||||
- "19530:19530"
|
||||
- "${MILVUS_STANDALONE_PORT:-9091}:9091"
|
||||
depends_on:
|
||||
- "milvus-etcd"
|
||||
- "milvus-minio"
|
||||
|
||||
dataprep-multimodal-milvus:
|
||||
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
|
||||
container_name: dataprep-multimodal-milvus
|
||||
depends_on:
|
||||
- milvus-standalone
|
||||
- lvm-llava
|
||||
ports:
|
||||
- "${DATAPREP_MMR_PORT}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MULTIMODAL_DATAPREP: true
|
||||
DATAPREP_COMPONENT_NAME: "OPEA_DATAPREP_MULTIMODALMILVUS"
|
||||
MILVUS_HOST: ${MILVUS_HOST}
|
||||
COLLECTION_NAME: ${COLLECTION_NAME:-LangChainCollection}
|
||||
LVM_ENDPOINT: "http://${LVM_SERVICE_HOST_IP}:${LVM_PORT}/v1/lvm"
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
|
||||
embedding-multimodal-bridgetower:
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest}
|
||||
container_name: embedding-multimodal-bridgetower
|
||||
ports:
|
||||
- ${EMM_BRIDGETOWER_PORT}:${EMM_BRIDGETOWER_PORT}
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
EMM_BRIDGETOWER_PORT: ${EMM_BRIDGETOWER_PORT}
|
||||
PORT: ${EMM_BRIDGETOWER_PORT}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "http_proxy='' curl -f http://localhost:${EMM_BRIDGETOWER_PORT}/v1/health_check"]
|
||||
interval: 10s
|
||||
timeout: 6s
|
||||
retries: 18
|
||||
start_period: 30s
|
||||
entrypoint: ["python", "bridgetower_server.py", "--device", "cpu", "--model_name_or_path", $EMBEDDING_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding
|
||||
depends_on:
|
||||
embedding-multimodal-bridgetower:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- ${MM_EMBEDDING_PORT_MICROSERVICE}:${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MMEI_EMBEDDING_ENDPOINT: ${MMEI_EMBEDDING_ENDPOINT}
|
||||
MM_EMBEDDING_PORT_MICROSERVICE: ${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
MULTIMODAL_EMBEDDING: true
|
||||
restart: unless-stopped
|
||||
|
||||
retriever-milvus:
|
||||
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
|
||||
container_name: retriever-milvus
|
||||
depends_on:
|
||||
- milvus-standalone
|
||||
ports:
|
||||
- "${MILVUS_RETRIEVER_PORT}:${MILVUS_RETRIEVER_PORT}"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MILVUS_HOST: ${host_ip}
|
||||
BRIDGE_TOWER_EMBEDDING: ${BRIDGE_TOWER_EMBEDDING}
|
||||
LOGFLAG: ${LOGFLAG}
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_MILVUS"
|
||||
COLLECTION_NAME: ${COLLECTION_NAME:-LangChainCollection}
|
||||
restart: unless-stopped
|
||||
|
||||
lvm-llava:
|
||||
image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest}
|
||||
container_name: lvm-llava
|
||||
ports:
|
||||
- "${LLAVA_SERVER_PORT}:${LLAVA_SERVER_PORT}"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LLAVA_SERVER_PORT: ${LLAVA_SERVER_PORT}
|
||||
LVM_PORT: ${LVM_PORT}
|
||||
entrypoint: ["python", "llava_server.py", "--device", "cpu", "--model_name_or_path", $LVM_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
|
||||
lvm:
|
||||
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
|
||||
container_name: lvm
|
||||
depends_on:
|
||||
- lvm-llava
|
||||
ports:
|
||||
- "${LVM_PORT}:${LVM_PORT}"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LVM_COMPONENT_NAME: "OPEA_LLAVA_LVM"
|
||||
LVM_ENDPOINT: ${LVM_ENDPOINT}
|
||||
LLAVA_SERVER_PORT: ${LLAVA_SERVER_PORT}
|
||||
LVM_PORT: ${LVM_PORT}
|
||||
MAX_IMAGES: ${MAX_IMAGES:-1}
|
||||
restart: unless-stopped
|
||||
|
||||
multimodalqna:
|
||||
image: ${REGISTRY:-opea}/multimodalqna:${TAG:-latest}
|
||||
container_name: multimodalqna-backend-server
|
||||
depends_on:
|
||||
- milvus-standalone
|
||||
- dataprep-multimodal-milvus
|
||||
- embedding
|
||||
- retriever-milvus
|
||||
- lvm
|
||||
ports:
|
||||
- "${MEGA_SERVICE_PORT}:${MEGA_SERVICE_PORT}"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
|
||||
MEGA_SERVICE_PORT: ${MEGA_SERVICE_PORT}
|
||||
MM_EMBEDDING_SERVICE_HOST_IP: ${MM_EMBEDDING_SERVICE_HOST_IP}
|
||||
MM_EMBEDDING_PORT_MICROSERVICE: ${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
MM_RETRIEVER_SERVICE_HOST_IP: ${MM_RETRIEVER_SERVICE_HOST_IP}
|
||||
LVM_SERVICE_HOST_IP: ${LVM_SERVICE_HOST_IP}
|
||||
LVM_MODEL_ID: ${LVM_MODEL_ID}
|
||||
WHISPER_PORT: ${WHISPER_PORT}
|
||||
WHISPER_SERVER_ENDPOINT: ${WHISPER_SERVER_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
multimodalqna-ui:
|
||||
image: ${REGISTRY:-opea}/multimodalqna-ui:${TAG:-latest}
|
||||
container_name: multimodalqna-gradio-ui-server
|
||||
depends_on:
|
||||
- multimodalqna
|
||||
ports:
|
||||
- "${UI_PORT}:${UI_PORT}"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT}
|
||||
- DATAPREP_INGEST_SERVICE_ENDPOINT=${DATAPREP_INGEST_SERVICE_ENDPOINT}
|
||||
- DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT=${DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT}
|
||||
- DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT=${DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT}
|
||||
- MEGA_SERVICE_PORT:=${MEGA_SERVICE_PORT}
|
||||
- UI_PORT=${UI_PORT}
|
||||
- DATAPREP_MMR_PORT=${DATAPREP_MMR_PORT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
811
MultimodalQnA/docker_compose/intel/cpu/xeon/config/milvus.yaml
Normal file
811
MultimodalQnA/docker_compose/intel/cpu/xeon/config/milvus.yaml
Normal file
@@ -0,0 +1,811 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Licensed to the LF AI & Data foundation under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Related configuration of etcd, used to store Milvus metadata & service discovery.
|
||||
etcd:
|
||||
endpoints: localhost:2379
|
||||
rootPath: by-dev # The root path where data is stored in etcd
|
||||
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
||||
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
||||
log:
|
||||
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
||||
# path is one of:
|
||||
# - "default" as os.Stderr,
|
||||
# - "stderr" as os.Stderr,
|
||||
# - "stdout" as os.Stdout,
|
||||
# - file path to append server logs to.
|
||||
# please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
|
||||
path: stdout
|
||||
ssl:
|
||||
enabled: false # Whether to support ETCD secure connection mode
|
||||
tlsCert: /path/to/etcd-client.pem # path to your cert file
|
||||
tlsKey: /path/to/etcd-client-key.pem # path to your key file
|
||||
tlsCACert: /path/to/ca.pem # path to your CACert file
|
||||
# TLS min version
|
||||
# Optional values: 1.0, 1.1, 1.2, 1.3。
|
||||
# We recommend using version 1.2 and above.
|
||||
tlsMinVersion: 1.3
|
||||
requestTimeout: 10000 # Etcd operation timeout in milliseconds
|
||||
use:
|
||||
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
|
||||
data:
|
||||
dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
|
||||
auth:
|
||||
enabled: false # Whether to enable authentication
|
||||
userName: # username for etcd authentication
|
||||
password: # password for etcd authentication
|
||||
|
||||
metastore:
|
||||
type: etcd # Default value: etcd, Valid values: [etcd, tikv]
|
||||
|
||||
# Related configuration of tikv, used to store Milvus metadata.
|
||||
# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery.
|
||||
# TiKV is a good option when the metadata size requires better horizontal scalability.
|
||||
tikv:
|
||||
endpoints: 127.0.0.1:2389 # Note that the default pd port of tikv is 2379, which conflicts with etcd.
|
||||
rootPath: by-dev # The root path where data is stored in tikv
|
||||
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
||||
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
||||
requestTimeout: 10000 # ms, tikv request timeout
|
||||
snapshotScanSize: 256 # batch size of tikv snapshot scan
|
||||
ssl:
|
||||
enabled: false # Whether to support TiKV secure connection mode
|
||||
tlsCert: # path to your cert file
|
||||
tlsKey: # path to your key file
|
||||
tlsCACert: # path to your CACert file
|
||||
|
||||
localStorage:
|
||||
path: /var/lib/milvus/data/ # please adjust in embedded Milvus: /tmp/milvus/data/
|
||||
|
||||
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
|
||||
# We refer to the storage service as MinIO/S3 in the following description for simplicity.
|
||||
minio:
|
||||
address: localhost # Address of MinIO/S3
|
||||
port: 9000 # Port of MinIO/S3
|
||||
accessKeyID: minioadmin # accessKeyID of MinIO/S3
|
||||
secretAccessKey: minioadmin # MinIO/S3 encryption string
|
||||
useSSL: false # Access to MinIO/S3 with SSL
|
||||
ssl:
|
||||
tlsCACert: /path/to/public.crt # path to your CACert file
|
||||
bucketName: a-bucket # Bucket name in MinIO/S3
|
||||
rootPath: files # The root path where the message is stored in MinIO/S3
|
||||
# Whether to useIAM role to access S3/GCS instead of access/secret keys
|
||||
# For more information, refer to
|
||||
# aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
|
||||
# gcp: https://cloud.google.com/storage/docs/access-control/iam
|
||||
# aliyun (ack): https://www.alibabacloud.com/help/en/container-service-for-kubernetes/latest/use-rrsa-to-enforce-access-control
|
||||
# aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role
|
||||
useIAM: false
|
||||
# Cloud Provider of S3. Supports: "aws", "gcp", "aliyun".
|
||||
# You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
|
||||
# You can use "gcp" for other cloud provider supports S3 API with signature v2
|
||||
# You can use "aliyun" for other cloud provider uses virtual host style bucket
|
||||
# When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now
|
||||
cloudProvider: aws
|
||||
# Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
|
||||
# Leave it empty if you want to use AWS default endpoint
|
||||
iamEndpoint:
|
||||
logLevel: fatal # Log level for aws sdk log. Supported level: off, fatal, error, warn, info, debug, trace
|
||||
region: # Specify minio storage system location region
|
||||
useVirtualHost: false # Whether use virtual host mode for bucket
|
||||
requestTimeoutMs: 10000 # minio timeout for request time in milliseconds
|
||||
# The maximum number of objects requested per batch in minio ListObjects rpc,
|
||||
# 0 means using oss client by default, decrease these configuration if ListObjects timeout
|
||||
listObjectsMaxKeys: 0
|
||||
|
||||
# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
|
||||
# You can change your mq by setting mq.type field.
|
||||
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
|
||||
# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka
|
||||
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)
|
||||
mq:
|
||||
# Default value: "default"
|
||||
# Valid values: [default, pulsar, kafka, rocksmq, natsmq]
|
||||
type: default
|
||||
enablePursuitMode: true # Default value: "true"
|
||||
pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds
|
||||
pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes
|
||||
mqBufSize: 16 # MQ client consumer buffer length
|
||||
dispatcher:
|
||||
mergeCheckInterval: 1 # the interval time(in seconds) for dispatcher to check whether to merge
|
||||
targetBufSize: 16 # the length of channel buffer for targe
|
||||
maxTolerantLag: 3 # Default value: "3", the timeout(in seconds) that target sends msgPack
|
||||
|
||||
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
|
||||
pulsar:
|
||||
address: localhost # Address of pulsar
|
||||
port: 6650 # Port of Pulsar
|
||||
webport: 80 # Web port of pulsar, if you connect directly without proxy, should use 8080
|
||||
maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
|
||||
tenant: public
|
||||
namespace: default
|
||||
requestTimeout: 60 # pulsar client global request timeout in seconds
|
||||
enableClientMetrics: false # Whether to register pulsar client metrics into milvus metrics path.
|
||||
|
||||
# If you want to enable kafka, needs to comment the pulsar configs
|
||||
# kafka:
|
||||
# brokerList:
|
||||
# saslUsername:
|
||||
# saslPassword:
|
||||
# saslMechanisms:
|
||||
# securityProtocol:
|
||||
# ssl:
|
||||
# enabled: false # whether to enable ssl mode
|
||||
# tlsCert: # path to client's public key (PEM) used for authentication
|
||||
# tlsKey: # path to client's private key (PEM) used for authentication
|
||||
# tlsCaCert: # file or directory path to CA certificate(s) for verifying the broker's key
|
||||
# tlsKeyPassword: # private key passphrase for use with ssl.key.location and set_ssl_cert(), if any
|
||||
# readTimeout: 10
|
||||
|
||||
rocksmq:
|
||||
# The path where the message is stored in rocksmq
|
||||
# please adjust in embedded Milvus: /tmp/milvus/rdb_data
|
||||
path: /var/lib/milvus/rdb_data
|
||||
lrucacheratio: 0.06 # rocksdb cache memory ratio
|
||||
rocksmqPageSize: 67108864 # 64 MB, 64 * 1024 * 1024 bytes, The size of each page of messages in rocksmq
|
||||
retentionTimeInMinutes: 4320 # 3 days, 3 * 24 * 60 minutes, The retention time of the message in rocksmq.
|
||||
retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
|
||||
compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
|
||||
compressionTypes: 0,0,7,7,7 # compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level.
|
||||
|
||||
# natsmq configuration.
|
||||
# more detail: https://docs.nats.io/running-a-nats-service/configuration
|
||||
natsmq:
|
||||
server:
|
||||
port: 4222 # Port for nats server listening
|
||||
storeDir: /var/lib/milvus/nats # Directory to use for JetStream storage of nats
|
||||
maxFileStore: 17179869184 # Maximum size of the 'file' storage
|
||||
maxPayload: 8388608 # Maximum number of bytes in a message payload
|
||||
maxPending: 67108864 # Maximum number of bytes buffered for a connection Applies to client connections
|
||||
initializeTimeout: 4000 # waiting for initialization of natsmq finished
|
||||
monitor:
|
||||
trace: false # If true enable protocol trace log messages
|
||||
debug: false # If true enable debug log messages
|
||||
logTime: true # If set to false, log without timestamps.
|
||||
logFile: /tmp/milvus/logs/nats.log # Log file path relative to .. of milvus binary if use relative path
|
||||
logSizeLimit: 536870912 # Size in bytes after the log file rolls over to a new one
|
||||
retention:
|
||||
maxAge: 4320 # Maximum age of any message in the P-channel
|
||||
maxBytes: # How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size
|
||||
maxMsgs: # How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit
|
||||
|
||||
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
|
||||
rootCoord:
|
||||
dmlChannelNum: 16 # The number of dml channels created at system startup
|
||||
maxPartitionNum: 1024 # Maximum number of partitions in a collection
|
||||
minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed
|
||||
enableActiveStandby: false
|
||||
maxDatabaseNum: 64 # Maximum number of database
|
||||
maxGeneralCapacity: 65536 # upper limit for the sum of of product of partitionNumber and shardNumber
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 53100
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Related configuration of proxy, used to validate client requests and reduce the returned results.
|
||||
proxy:
|
||||
timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
|
||||
healthCheckTimeout: 3000 # ms, the interval that to do component healthy check
|
||||
msgStream:
|
||||
timeTick:
|
||||
bufSize: 512
|
||||
maxNameLength: 255 # Maximum length of name for a collection or alias
|
||||
# Maximum number of fields in a collection.
|
||||
# As of today (2.2.0 and after) it is strongly DISCOURAGED to set maxFieldNum >= 64.
|
||||
# So adjust at your risk!
|
||||
maxFieldNum: 64
|
||||
maxVectorFieldNum: 4 # Maximum number of vector fields in a collection.
|
||||
maxShardNum: 16 # Maximum number of shards in a collection
|
||||
maxDimension: 32768 # Maximum dimension of a vector
|
||||
# Whether to produce gin logs.\n
|
||||
# please adjust in embedded Milvus: false
|
||||
ginLogging: true
|
||||
ginLogSkipPaths: / # skip url path for gin log
|
||||
maxTaskNum: 1024 # max task number of proxy task queue
|
||||
mustUsePartitionKey: false # switch for whether proxy must use partition key for the collection
|
||||
accessLog:
|
||||
enable: false # if use access log
|
||||
minioEnable: false # if upload sealed access log file to minio
|
||||
localPath: /tmp/milvus_access
|
||||
filename: # Log filename, leave empty to use stdout.
|
||||
maxSize: 64 # Max size for a single file, in MB.
|
||||
cacheSize: 10240 # Size of log of memory cache, in B
|
||||
rotatedTime: 0 # Max time for single access log file in seconds
|
||||
remotePath: access_log/ # File path in minIO
|
||||
remoteMaxTime: 0 # Max time for log file in minIO, in hours
|
||||
formatters:
|
||||
base:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
|
||||
query:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
|
||||
methods: "Query,Search,Delete"
|
||||
connectionCheckIntervalSeconds: 120 # the interval time(in seconds) for connection manager to scan inactive client info
|
||||
connectionClientInfoTTLSeconds: 86400 # inactive client info TTL duration, in seconds
|
||||
maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos
|
||||
gracefulStopTimeout: 30 # seconds. force stop node without graceful stop
|
||||
slowQuerySpanInSeconds: 5 # query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds.
|
||||
http:
|
||||
enabled: true # Whether to enable the http server
|
||||
debug_mode: false # Whether to enable http server debug mode
|
||||
port: # high-level restful api
|
||||
acceptTypeAllowInt64: true # high-level restful api, whether http client can deal with int64
|
||||
enablePprof: true # Whether to enable pprof middleware on the metrics port
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19530
|
||||
internalPort: 19529
|
||||
grpc:
|
||||
serverMaxSendSize: 268435456
|
||||
serverMaxRecvSize: 67108864
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 67108864
|
||||
|
||||
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
|
||||
queryCoord:
|
||||
taskMergeCap: 1
|
||||
taskExecutionCap: 256
|
||||
autoHandoff: true # Enable auto handoff
|
||||
autoBalance: true # Enable auto balance
|
||||
autoBalanceChannel: true # Enable auto balance channel
|
||||
balancer: ScoreBasedBalancer # auto balancer used for segments on queryNodes
|
||||
globalRowCountFactor: 0.1 # the weight used when balancing segments among queryNodes
|
||||
scoreUnbalanceTolerationFactor: 0.05 # the least value for unbalanced extent between from and to nodes when doing balance
|
||||
reverseUnBalanceTolerationFactor: 1.3 # the largest value for unbalanced extent between from and to nodes after doing balance
|
||||
overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
|
||||
balanceIntervalSeconds: 60
|
||||
memoryUsageMaxDifferencePercentage: 30
|
||||
rowCountFactor: 0.4 # the row count weight used when balancing segments among queryNodes
|
||||
segmentCountFactor: 0.4 # the segment count weight used when balancing segments among queryNodes
|
||||
globalSegmentCountFactor: 0.1 # the segment count weight used when balancing segments among queryNodes
|
||||
segmentCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
rowCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
randomMaxSteps: 10 # segment count based plan generator max steps
|
||||
growingRowCountWeight: 4 # the memory weight of growing segment row count
|
||||
balanceCostThreshold: 0.001 # the threshold of balance cost, if the difference of cluster's cost after executing the balance plan is less than this value, the plan will not be executed
|
||||
checkSegmentInterval: 1000
|
||||
checkChannelInterval: 1000
|
||||
checkBalanceInterval: 10000
|
||||
checkIndexInterval: 10000
|
||||
channelTaskTimeout: 60000 # 1 minute
|
||||
segmentTaskTimeout: 120000 # 2 minute
|
||||
distPullInterval: 500
|
||||
collectionObserverInterval: 200
|
||||
checkExecutedFlagInterval: 100
|
||||
heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
|
||||
loadTimeoutSeconds: 600
|
||||
distRequestTimeout: 5000 # the request timeout for querycoord fetching data distribution from querynodes, in milliseconds
|
||||
heatbeatWarningLag: 5000 # the lag value for querycoord report warning when last heartbeat is too old, in milliseconds
|
||||
checkHandoffInterval: 5000
|
||||
enableActiveStandby: false
|
||||
checkInterval: 1000
|
||||
checkHealthInterval: 3000 # 3s, the interval when query coord try to check health of query node
|
||||
checkHealthRPCTimeout: 2000 # 100ms, the timeout of check health rpc to query node
|
||||
brokerTimeout: 5000 # 5000ms, querycoord broker rpc timeout
|
||||
collectionRecoverTimes: 3 # if collection recover times reach the limit during loading state, release it
|
||||
observerTaskParallel: 16 # the parallel observer dispatcher task number
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
checkNodeSessionInterval: 60 # the interval(in seconds) of check querynode cluster session
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
enableStoppingBalance: true # whether enable stopping balance
|
||||
channelExclusiveNodeFactor: 4 # the least node number for enable channel's exclusive mode
|
||||
cleanExcludeSegmentInterval: 60 # the time duration of clean pipeline exclude segment which used for filter invalid data, in seconds
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19531
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
|
||||
queryNode:
|
||||
stats:
|
||||
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
||||
segcore:
|
||||
knowhereThreadPoolNumRatio: 4 # The number of threads in knowhere's thread pool. If disk is enabled, the pool size will multiply with knowhereThreadPoolNumRatio([1, 32]).
|
||||
chunkRows: 128 # The number of vectors in a chunk.
|
||||
interimIndex:
|
||||
enableIndex: true # Enable segment build with index to accelerate vector search when segment is in growing or binlog.
|
||||
nlist: 128 # temp index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
|
||||
nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
|
||||
memExpansionRate: 1.15 # extra memory needed by building interim index
|
||||
buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num
|
||||
knowhereScoreConsistency: false # Enable knowhere strong consistency score computation logic
|
||||
loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
|
||||
enableDisk: false # enable querynode load disk index, and search on disk index
|
||||
maxDiskUsagePercentage: 95
|
||||
cache:
|
||||
enabled: true
|
||||
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
|
||||
readAheadPolicy: willneed # The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed`
|
||||
# options: async, sync, disable.
|
||||
# Specifies the necessity for warming up the chunk cache.
|
||||
# 1. If set to "sync" or "async" the original vector data will be synchronously/asynchronously loaded into the
|
||||
# chunk cache during the load process. This approach has the potential to substantially reduce query/search latency
|
||||
# for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage;
|
||||
# 2. If set to "disable" original vector data will only be loaded into the chunk cache during search/query.
|
||||
warmup: disable
|
||||
mmap:
|
||||
mmapEnabled: false # Enable mmap for loading data
|
||||
lazyload:
|
||||
enabled: false # Enable lazyload for loading data
|
||||
waitTimeout: 30000 # max wait timeout duration in milliseconds before start to do lazyload search and retrieve
|
||||
requestResourceTimeout: 5000 # max timeout in milliseconds for waiting request resource for lazy load, 5s by default
|
||||
requestResourceRetryInterval: 2000 # retry interval in milliseconds for waiting request resource for lazy load, 2s by default
|
||||
maxRetryTimes: 1 # max retry times for lazy load, 1 by default
|
||||
maxEvictPerRetry: 1 # max evict count for lazy load, 1 by default
|
||||
grouping:
|
||||
enabled: true
|
||||
maxNQ: 1000
|
||||
topKMergeRatio: 20
|
||||
scheduler:
|
||||
receiveChanSize: 10240
|
||||
unsolvedQueueSize: 10240
|
||||
# maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
|
||||
# Max read concurrency would be the value of hardware.GetCPUNum * maxReadConcurrentRatio.
|
||||
# It defaults to 2.0, which means max read concurrency would be the value of hardware.GetCPUNum * 2.
|
||||
# Max read concurrency must greater than or equal to 1, and less than or equal to hardware.GetCPUNum * 100.
|
||||
# (0, 100]
|
||||
maxReadConcurrentRatio: 1
|
||||
cpuRatio: 10 # ratio used to estimate read task cpu usage.
|
||||
maxTimestampLag: 86400
|
||||
scheduleReadPolicy:
|
||||
# fifo: A FIFO queue support the schedule.
|
||||
# user-task-polling:
|
||||
# The user's tasks will be polled one by one and scheduled.
|
||||
# Scheduling is fair on task granularity.
|
||||
# The policy is based on the username for authentication.
|
||||
# And an empty username is considered the same user.
|
||||
# When there are no multi-users, the policy decay into FIFO"
|
||||
name: fifo
|
||||
taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty
|
||||
enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other)
|
||||
maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
enableSegmentPrune: false # use partition prune function on shard delegator
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21123
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
indexCoord:
|
||||
bindIndexNodeMode:
|
||||
enable: false
|
||||
address: localhost:22930
|
||||
withCred: false
|
||||
nodeID: 0
|
||||
segment:
|
||||
minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed
|
||||
|
||||
indexNode:
|
||||
scheduler:
|
||||
buildParallel: 1
|
||||
enableDisk: true # enable index node build disk vector index
|
||||
maxDiskUsagePercentage: 95
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21121
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
dataCoord:
|
||||
channel:
|
||||
watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
|
||||
balanceWithRpc: true # Whether to enable balance with RPC, default to use etcd watch
|
||||
legacyVersionWithoutRPCWatch: 2.4.1 # Datanodes <= this version are considered as legacy nodes, which doesn't have rpc based watch(). This is only used during rolling upgrade where legacy nodes won't get new channels
|
||||
balanceSilentDuration: 300 # The duration after which the channel manager start background channel balancing
|
||||
balanceInterval: 360 # The interval with which the channel manager check dml channel balance status
|
||||
checkInterval: 1 # The interval in seconds with which the channel manager advances channel states
|
||||
notifyChannelOperationTimeout: 5 # Timeout notifing channel operations (in seconds).
|
||||
segment:
|
||||
maxSize: 1024 # Maximum size of a segment in MB
|
||||
diskSegmentMaxSize: 2048 # Maximum size of a segment in MB for collection which has Disk index
|
||||
sealProportion: 0.12
|
||||
assignmentExpiration: 2000 # The time of the assignment expiration in ms
|
||||
allocLatestExpireAttempt: 200 # The time attempting to alloc latest lastExpire from rootCoord after restart
|
||||
maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
|
||||
# If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
|
||||
# minSizeFromIdleToSealed, Milvus will automatically seal it.
|
||||
# The max idle time of segment in seconds, 10*60.
|
||||
maxIdleTime: 600
|
||||
minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
|
||||
# The max number of binlog file for one segment, the segment will be sealed if
|
||||
# the number of binlog file reaches to max value.
|
||||
maxBinlogFileNumber: 32
|
||||
smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than
|
||||
# (smallProportion * segment max # of rows).
|
||||
# A compaction will happen on small segments if the segment after compaction will have
|
||||
compactableProportion: 0.85
|
||||
# over (compactableProportion * segment max # of rows) rows.
|
||||
# MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
|
||||
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
|
||||
expansionRate: 1.25
|
||||
autoUpgradeSegmentIndex: false # whether auto upgrade segment index to index engine's version
|
||||
enableCompaction: true # Enable data segment compaction
|
||||
compaction:
|
||||
enableAutoCompaction: true
|
||||
indexBasedCompaction: true
|
||||
rpcTimeout: 10
|
||||
maxParallelTaskNum: 10
|
||||
workerMaxParallelTaskNum: 2
|
||||
levelzero:
|
||||
forceTrigger:
|
||||
minSize: 8388608 # The minimum size in bytes to force trigger a LevelZero Compaction, default as 8MB
|
||||
maxSize: 67108864 # The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB
|
||||
deltalogMinNum: 10 # The minimum number of deltalog files to force trigger a LevelZero Compaction
|
||||
deltalogMaxNum: 30 # The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30
|
||||
enableGarbageCollection: true
|
||||
gc:
|
||||
interval: 3600 # gc interval in seconds
|
||||
missingTolerance: 86400 # file meta missing tolerance duration in seconds, default to 24hr(1d)
|
||||
dropTolerance: 10800 # file belongs to dropped entity tolerance duration in seconds. 3600
|
||||
removeConcurrent: 32 # number of concurrent goroutines to remove dropped s3 objects
|
||||
scanInterval: 168 # garbage collection scan residue interval in hours
|
||||
enableActiveStandby: false
|
||||
brokerTimeout: 5000 # 5000ms, dataCoord broker rpc timeout
|
||||
autoBalance: true # Enable auto balance
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
import:
|
||||
filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task.
|
||||
taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state.
|
||||
maxSizeInMBPerImportTask: 6144 # To prevent generating of small segments, we will re-group imported files. This parameter represents the sum of file sizes in each group (each ImportTask).
|
||||
scheduleInterval: 2 # The interval for scheduling import, measured in seconds.
|
||||
checkIntervalHigh: 2 # The interval for checking import, measured in seconds, is set to a high frequency for the import checker.
|
||||
checkIntervalLow: 120 # The interval for checking import, measured in seconds, is set to a low frequency for the import checker.
|
||||
maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request.
|
||||
waitForIndex: true # Indicates whether the import operation waits for the completion of index building.
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 13333
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
dataNode:
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
maxParallelSyncMgrTasks: 256 # The max concurrent sync task number of datanode sync mgr globally
|
||||
skipMode:
|
||||
enable: true # Support skip some timetick message to reduce CPU usage
|
||||
skipNum: 4 # Consume one for every n records skipped
|
||||
coldTime: 60 # Turn on skip mode after there are only timetick msg for x seconds
|
||||
segment:
|
||||
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
|
||||
deleteBufBytes: 16777216 # Max buffer size in bytes to flush del for a single channel, default as 16MB
|
||||
syncPeriod: 600 # The period to sync segments if buffer is not empty.
|
||||
memory:
|
||||
forceSyncEnable: true # Set true to force sync if memory usage is too high
|
||||
forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced.
|
||||
checkInterval: 3000 # the interval to check datanode memory usage, in milliseconds
|
||||
forceSyncWatermark: 0.5 # memory watermark for standalone, upon reaching this watermark, segments will be synced.
|
||||
timetick:
|
||||
byRPC: true
|
||||
interval: 500
|
||||
channel:
|
||||
# specify the size of global work pool of all channels
|
||||
# if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
|
||||
# suggest to set it bigger on large collection numbers to avoid blocking
|
||||
workPoolSize: -1
|
||||
# specify the size of global work pool for channel checkpoint updating
|
||||
# if this parameter <= 0, will set it as 10
|
||||
updateChannelCheckpointMaxParallel: 10
|
||||
updateChannelCheckpointInterval: 60 # the interval duration(in seconds) for datanode to update channel checkpoint of each channel
|
||||
updateChannelCheckpointRPCTimeout: 20 # timeout in seconds for UpdateChannelCheckpoint RPC call
|
||||
maxChannelCheckpointsPerPRC: 128 # The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC.
|
||||
channelCheckpointUpdateTickInSeconds: 10 # The frequency, in seconds, at which the channel checkpoint updater executes updates.
|
||||
import:
|
||||
maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode.
|
||||
maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files.
|
||||
readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import.
|
||||
compaction:
|
||||
levelZeroBatchMemoryRatio: 0.05 # The minimal memory ratio of free memory for level zero compaction executing in batch mode
|
||||
gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21124
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Configures the system log output.
|
||||
log:
|
||||
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
||||
file:
|
||||
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
|
||||
maxSize: 300 # MB
|
||||
maxAge: 10 # Maximum time for log retention in day.
|
||||
maxBackups: 20
|
||||
format: text # text or json
|
||||
stdout: true # Stdout enable or not
|
||||
|
||||
grpc:
|
||||
log:
|
||||
level: WARNING
|
||||
gracefulStopTimeout: 10 # second, time to wait graceful stop finish
|
||||
client:
|
||||
compressionEnabled: false
|
||||
dialTimeout: 200
|
||||
keepAliveTime: 10000
|
||||
keepAliveTimeout: 20000
|
||||
maxMaxAttempts: 10
|
||||
initialBackoff: 0.2
|
||||
maxBackoff: 10
|
||||
minResetInterval: 1000
|
||||
maxCancelError: 32
|
||||
minSessionCheckInterval: 200
|
||||
|
||||
# Configure the proxy tls enable.
|
||||
tls:
|
||||
serverPemPath: configs/cert/server.pem
|
||||
serverKeyPath: configs/cert/server.key
|
||||
caPemPath: configs/cert/ca.pem
|
||||
|
||||
common:
|
||||
defaultPartitionName: _default # default partition name for a collection
|
||||
defaultIndexName: _default_idx # default index name
|
||||
entityExpiration: -1 # Entity expiration in seconds, CAUTION -1 means never expire
|
||||
indexSliceSize: 16 # MB
|
||||
threadCoreCoefficient:
|
||||
highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority pool
|
||||
middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority pool
|
||||
lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority pool
|
||||
buildIndexThreadPoolRatio: 0.75
|
||||
DiskIndex:
|
||||
MaxDegree: 56
|
||||
SearchListSize: 100
|
||||
PQCodeBudgetGBRatio: 0.125
|
||||
BuildNumThreadsRatio: 1
|
||||
SearchCacheBudgetGBRatio: 0.1
|
||||
LoadNumThreadRatio: 8
|
||||
BeamWidthRatio: 4
|
||||
gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
|
||||
gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
|
||||
storageType: remote # please adjust in embedded Milvus: local, available values are [local, remote, opendal], value minio is deprecated, use remote instead
|
||||
# Default value: auto
|
||||
# Valid values: [auto, avx512, avx2, avx, sse4_2]
|
||||
# This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
|
||||
simdType: auto
|
||||
security:
|
||||
authorizationEnabled: false
|
||||
# The superusers will ignore some system check processes,
|
||||
# like the old password verification when updating the credential
|
||||
superUsers:
|
||||
tlsMode: 0
|
||||
session:
|
||||
ttl: 30 # ttl value when session granting a lease to register service
|
||||
retryTimes: 30 # retry times when session sending etcd requests
|
||||
locks:
|
||||
metrics:
|
||||
enable: false # whether gather statistics for metrics locks
|
||||
threshold:
|
||||
info: 500 # minimum milliseconds for printing durations in info level
|
||||
warn: 1000 # minimum milliseconds for printing durations in warn level
|
||||
storage:
|
||||
scheme: s3
|
||||
enablev2: false
|
||||
ttMsgEnabled: true # Whether the instance disable sending ts messages
|
||||
traceLogMode: 0 # trace request info
|
||||
bloomFilterSize: 100000 # bloom filter initial size
|
||||
maxBloomFalsePositive: 0.001 # max false positive rate for bloom filter
|
||||
|
||||
# QuotaConfig, configurations of Milvus quota and limits.
|
||||
# By default, we enable:
|
||||
# 1. TT protection;
|
||||
# 2. Memory protection.
|
||||
# 3. Disk quota protection.
|
||||
# You can enable:
|
||||
# 1. DML throughput limitation;
|
||||
# 2. DDL, DQL qps/rps limitation;
|
||||
# 3. DQL Queue length/latency protection;
|
||||
# 4. DQL result rate protection;
|
||||
# If necessary, you can also manually force to deny RW requests.
|
||||
quotaAndLimits:
|
||||
enabled: true # `true` to enable quota and limits, `false` to disable.
|
||||
# quotaCenterCollectInterval is the time interval that quotaCenter
|
||||
# collects metrics from Proxies, Query cluster and Data cluster.
|
||||
# seconds, (0 ~ 65536)
|
||||
quotaCenterCollectInterval: 3
|
||||
ddl:
|
||||
enabled: false
|
||||
collectionRate: -1 # qps, default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
|
||||
partitionRate: -1 # qps, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
|
||||
db:
|
||||
collectionRate: -1 # qps of db level , default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
|
||||
partitionRate: -1 # qps of db level, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
|
||||
indexRate:
|
||||
enabled: false
|
||||
max: -1 # qps, default no limit, rate for CreateIndex, DropIndex
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for CreateIndex, DropIndex
|
||||
flushRate:
|
||||
enabled: true
|
||||
max: -1 # qps, default no limit, rate for flush
|
||||
collection:
|
||||
max: 0.1 # qps, default no limit, rate for flush at collection level.
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for flush
|
||||
compactionRate:
|
||||
enabled: false
|
||||
max: -1 # qps, default no limit, rate for manualCompaction
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for manualCompaction
|
||||
dml:
|
||||
# dml limit rates, default no limit.
|
||||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
insertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
upsertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
deleteRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
bulkLoadRate:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
||||
db:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit db bulkLoad rate
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit collection bulkLoad rate
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit partition bulkLoad rate
|
||||
dql:
|
||||
# dql limit rates, default no limit.
|
||||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
searchRate:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
db:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
collection:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
partition:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
queryRate:
|
||||
max: -1 # qps, default no limit
|
||||
db:
|
||||
max: -1 # qps, default no limit
|
||||
collection:
|
||||
max: -1 # qps, default no limit
|
||||
partition:
|
||||
max: -1 # qps, default no limit
|
||||
limits:
|
||||
maxCollectionNum: 65536
|
||||
maxCollectionNumPerDB: 65536
|
||||
maxInsertSize: -1 # maximum size of a single insert request, in bytes, -1 means no limit
|
||||
maxResourceGroupNumOfQueryNode: 1024 # maximum number of resource groups of query nodes
|
||||
limitWriting:
|
||||
# forceDeny false means dml requests are allowed (except for some
|
||||
# specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
|
||||
forceDeny: false
|
||||
ttProtection:
|
||||
enabled: false
|
||||
# maxTimeTickDelay indicates the backpressure for DML Operations.
|
||||
# DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
|
||||
# if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
|
||||
# seconds
|
||||
maxTimeTickDelay: 300
|
||||
memProtection:
|
||||
# When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
|
||||
# When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
|
||||
# When memory usage < memoryLowWaterLevel, no action.
|
||||
enabled: true
|
||||
dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes
|
||||
dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes
|
||||
queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes
|
||||
queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes
|
||||
growingSegmentsSizeProtection:
|
||||
# No action will be taken if the growing segments size is less than the low watermark.
|
||||
# When the growing segments size exceeds the low watermark, the dml rate will be reduced,
|
||||
# but the rate will not be lower than minRateRatio * dmlRate.
|
||||
enabled: false
|
||||
minRateRatio: 0.5
|
||||
lowWaterLevel: 0.2
|
||||
highWaterLevel: 0.4
|
||||
diskProtection:
|
||||
enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
|
||||
diskQuota: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerDB: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerCollection: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerPartition: -1 # MB, (0, +inf), default no limit
|
||||
limitReading:
|
||||
# forceDeny false means dql requests are allowed (except for some
|
||||
# specific conditions, such as collection has been dropped), true means always reject all dql requests.
|
||||
forceDeny: false
|
||||
queueProtection:
|
||||
enabled: false
|
||||
# nqInQueueThreshold indicated that the system was under backpressure for Search/Query path.
|
||||
# If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off
|
||||
# until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1.
|
||||
# int, default no limit
|
||||
nqInQueueThreshold: -1
|
||||
# queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
|
||||
# If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
|
||||
# until the latency of queuing no longer exceeds queueLatencyThreshold.
|
||||
# The latency here refers to the averaged latency over a period of time.
|
||||
# milliseconds, default no limit
|
||||
queueLatencyThreshold: -1
|
||||
resultProtection:
|
||||
enabled: false
|
||||
# maxReadResultRate indicated that the system was under backpressure for Search/Query path.
|
||||
# If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off
|
||||
# until the read result rate no longer exceeds maxReadResultRate.
|
||||
# MB/s, default no limit
|
||||
maxReadResultRate: -1
|
||||
maxReadResultRatePerDB: -1
|
||||
maxReadResultRatePerCollection: -1
|
||||
# colOffSpeed is the speed of search&query rates cool off.
|
||||
# (0, 1]
|
||||
coolOffSpeed: 0.9
|
||||
|
||||
trace:
|
||||
# trace exporter type, default is stdout,
|
||||
# optional values: ['noop','stdout', 'jaeger', 'otlp']
|
||||
exporter: noop
|
||||
# fraction of traceID based sampler,
|
||||
# optional values: [0, 1]
|
||||
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
|
||||
sampleFraction: 0
|
||||
jaeger:
|
||||
url: # when exporter is jaeger should set the jaeger's URL
|
||||
otlp:
|
||||
endpoint: # example: "127.0.0.1:4318"
|
||||
secure: true
|
||||
|
||||
#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation.
|
||||
#here, you can set the size of the memory occupied by the memory pool, with the unit being MB.
|
||||
#note that there is a possibility of Milvus crashing when the actual memory demand exceeds the value set by maxMemSize.
|
||||
#if initMemSize and MaxMemSize both set zero,
|
||||
#milvus will automatically initialize half of the available GPU memory,
|
||||
#maxMemSize will the whole available GPU memory.
|
||||
gpu:
|
||||
initMemSize: # Gpu Memory Pool init size
|
||||
maxMemSize: # Gpu Memory Pool Max size
|
||||
@@ -177,6 +177,17 @@ cd GenAIExamples/MultimodalQnA/docker_compose/intel/hpu/gaudi/
|
||||
docker compose -f compose.yaml up -d
|
||||
```
|
||||
|
||||
> Alternatively, you can run docker compose with `compose_milvus.yaml` to use the Milvus vector database:
|
||||
|
||||
```bash
|
||||
export MILVUS_HOST=${host_ip}
|
||||
export MILVUS_PORT=19530
|
||||
export MILVUS_RETRIEVER_PORT=7000
|
||||
export COLLECTION_NAME=mm_rag_milvus
|
||||
cd GenAIExamples/MultimodalQnA/docker_compose/intel/hpu/gaudi/
|
||||
docker compose -f compose_milvus.yaml up -d
|
||||
```
|
||||
|
||||
### Validate Microservices
|
||||
|
||||
1. embedding-multimodal-bridgetower
|
||||
|
||||
256
MultimodalQnA/docker_compose/intel/hpu/gaudi/compose_milvus.yaml
Normal file
256
MultimodalQnA/docker_compose/intel/hpu/gaudi/compose_milvus.yaml
Normal file
@@ -0,0 +1,256 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
whisper-service:
|
||||
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
||||
container_name: whisper-service
|
||||
ports:
|
||||
- "${WHISPER_PORT}:${WHISPER_PORT}"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
WHISPER_PORT: ${WHISPER_PORT}
|
||||
WHISPER_SERVER_ENDPOINT: ${WHISPER_SERVER_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
milvus-etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
environment:
|
||||
- ETCD_AUTO_COMPACTION_MODE=revision
|
||||
- ETCD_AUTO_COMPACTION_RETENTION=1000
|
||||
- ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
- ETCD_SNAPSHOT_COUNT=50000
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
milvus-minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
ports:
|
||||
- "${MINIO_PORT1:-5044}:9001"
|
||||
- "${MINIO_PORT2:-5043}:9000"
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.4.6
|
||||
command: ["milvus", "run", "standalone"]
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
environment:
|
||||
ETCD_ENDPOINTS: milvus-etcd:2379
|
||||
MINIO_ADDRESS: milvus-minio:9000
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/config/milvus.yaml:/milvus/configs/milvus.yaml
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
ports:
|
||||
- "19530:19530"
|
||||
- "${MILVUS_STANDALONE_PORT:-9091}:9091"
|
||||
depends_on:
|
||||
- "milvus-etcd"
|
||||
- "milvus-minio"
|
||||
dataprep-multimodal-milvus:
|
||||
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
|
||||
container_name: dataprep-multimodal-milvus
|
||||
depends_on:
|
||||
- "milvus-standalone"
|
||||
- "lvm"
|
||||
ports:
|
||||
- "${DATAPREP_MMR_PORT}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MULTIMODAL_DATAPREP: true
|
||||
DATAPREP_COMPONENT_NAME: "OPEA_DATAPREP_MULTIMODALMILVUS"
|
||||
MILVUS_HOST: ${MILVUS_HOST}
|
||||
COLLECTION_NAME: ${COLLECTION_NAME}
|
||||
LVM_ENDPOINT: "http://${LVM_SERVICE_HOST_IP}:${LVM_PORT}/v1/lvm"
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
embedding-multimodal-bridgetower-gaudi:
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest}
|
||||
container_name: embedding-multimodal-bridgetower-gaudi
|
||||
ports:
|
||||
- ${EMM_BRIDGETOWER_PORT}:${EMM_BRIDGETOWER_PORT}
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
PORT: ${EMM_BRIDGETOWER_PORT}
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "http_proxy='' curl -f http://localhost:${EMM_BRIDGETOWER_PORT}/v1/health_check"]
|
||||
interval: 10s
|
||||
timeout: 6s
|
||||
retries: 18
|
||||
start_period: 30s
|
||||
entrypoint: ["python", "bridgetower_server.py", "--device", "hpu", "--model_name_or_path", $EMBEDDING_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding
|
||||
depends_on:
|
||||
embedding-multimodal-bridgetower-gaudi:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- ${MM_EMBEDDING_PORT_MICROSERVICE}:${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MMEI_EMBEDDING_ENDPOINT: ${MMEI_EMBEDDING_ENDPOINT}
|
||||
MM_EMBEDDING_PORT_MICROSERVICE: ${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
EMM_BRIDGETOWER_PORT: ${EMM_BRIDGETOWER_PORT}
|
||||
MULTIMODAL_EMBEDDING: true
|
||||
restart: unless-stopped
|
||||
retriever-milvus:
|
||||
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
|
||||
container_name: retriever-milvus
|
||||
depends_on:
|
||||
- milvus-standalone
|
||||
ports:
|
||||
- "${MILVUS_RETRIEVER_PORT}:${MILVUS_RETRIEVER_PORT}"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MILVUS_HOST: ${host_ip}
|
||||
BRIDGE_TOWER_EMBEDDING: ${BRIDGE_TOWER_EMBEDDING}
|
||||
LOGFLAG: ${LOGFLAG}
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_MILVUS"
|
||||
COLLECTION_NAME: ${COLLECTION_NAME}
|
||||
restart: unless-stopped
|
||||
tgi-gaudi:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:2.3.1
|
||||
container_name: tgi-llava-gaudi-server
|
||||
ports:
|
||||
- ${TGI_GAUDI_PORT}
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_GAUDI_PORT: ${TGI_GAUDI_PORT}
|
||||
LLAVA_SERVER_PORT: ${LLAVA_SERVER_PORT}
|
||||
LVM_PORT: ${LVM_PORT}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
PREFILL_BATCH_BUCKET_SIZE: 1
|
||||
BATCH_BUCKET_SIZE: 1
|
||||
MAX_BATCH_TOTAL_TOKENS: 4096
|
||||
ENABLE_HPU_GRAPH: true
|
||||
LIMIT_HPU_GRAPH: true
|
||||
USE_FLASH_ATTENTION: true
|
||||
FLASH_ATTENTION_RECOMPUTE: true
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://${host_ip}:8399/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
ipc: host
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-tokens 3048 --max-total-tokens 4096
|
||||
restart: unless-stopped
|
||||
lvm:
|
||||
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
|
||||
container_name: lvm
|
||||
depends_on:
|
||||
- tgi-gaudi
|
||||
ports:
|
||||
- "${LVM_PORT}:${LVM_PORT}"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LVM_COMPONENT_NAME: "OPEA_TGI_LLAVA_LVM"
|
||||
LVM_ENDPOINT: ${LVM_ENDPOINT}
|
||||
LLAVA_SERVER_PORT: ${LLAVA_SERVER_PORT}
|
||||
LVM_PORT: ${LVM_PORT}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
MAX_IMAGES: ${MAX_IMAGES:-1}
|
||||
restart: unless-stopped
|
||||
multimodalqna:
|
||||
image: ${REGISTRY:-opea}/multimodalqna:${TAG:-latest}
|
||||
container_name: multimodalqna-backend-server
|
||||
depends_on:
|
||||
- milvus-standalone
|
||||
- dataprep-multimodal-milvus
|
||||
- embedding
|
||||
- retriever-milvus
|
||||
- lvm
|
||||
ports:
|
||||
- "${MEGA_SERVICE_PORT}:${MEGA_SERVICE_PORT}"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
|
||||
MEGA_SERVICE_PORT: ${MEGA_SERVICE_PORT}
|
||||
MM_EMBEDDING_SERVICE_HOST_IP: ${MM_EMBEDDING_SERVICE_HOST_IP}
|
||||
MM_EMBEDDING_PORT_MICROSERVICE: ${MM_EMBEDDING_PORT_MICROSERVICE}
|
||||
MM_RETRIEVER_SERVICE_HOST_IP: ${MM_RETRIEVER_SERVICE_HOST_IP}
|
||||
LVM_SERVICE_HOST_IP: ${LVM_SERVICE_HOST_IP}
|
||||
LVM_MODEL_ID: ${LVM_MODEL_ID}
|
||||
WHISPER_PORT: ${WHISPER_PORT}
|
||||
WHISPER_SERVER_ENDPOINT: ${WHISPER_SERVER_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
multimodalqna-ui:
|
||||
image: ${REGISTRY:-opea}/multimodalqna-ui:${TAG:-latest}
|
||||
container_name: multimodalqna-gradio-ui-server
|
||||
depends_on:
|
||||
- multimodalqna
|
||||
ports:
|
||||
- "${UI_PORT}:${UI_PORT}"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT}
|
||||
- DATAPREP_INGEST_SERVICE_ENDPOINT=${DATAPREP_INGEST_SERVICE_ENDPOINT}
|
||||
- DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT=${DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT}
|
||||
- DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT=${DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT}
|
||||
- MEGA_SERVICE_PORT:=${MEGA_SERVICE_PORT}
|
||||
- UI_PORT=${UI_PORT}
|
||||
- DATAPREP_MMR_PORT=${DATAPREP_MMR_PORT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
811
MultimodalQnA/docker_compose/intel/hpu/gaudi/config/milvus.yaml
Normal file
811
MultimodalQnA/docker_compose/intel/hpu/gaudi/config/milvus.yaml
Normal file
@@ -0,0 +1,811 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Licensed to the LF AI & Data foundation under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Related configuration of etcd, used to store Milvus metadata & service discovery.
|
||||
etcd:
|
||||
endpoints: localhost:2379
|
||||
rootPath: by-dev # The root path where data is stored in etcd
|
||||
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
||||
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
||||
log:
|
||||
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
||||
# path is one of:
|
||||
# - "default" as os.Stderr,
|
||||
# - "stderr" as os.Stderr,
|
||||
# - "stdout" as os.Stdout,
|
||||
# - file path to append server logs to.
|
||||
# please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
|
||||
path: stdout
|
||||
ssl:
|
||||
enabled: false # Whether to support ETCD secure connection mode
|
||||
tlsCert: /path/to/etcd-client.pem # path to your cert file
|
||||
tlsKey: /path/to/etcd-client-key.pem # path to your key file
|
||||
tlsCACert: /path/to/ca.pem # path to your CACert file
|
||||
# TLS min version
|
||||
# Optional values: 1.0, 1.1, 1.2, 1.3。
|
||||
# We recommend using version 1.2 and above.
|
||||
tlsMinVersion: 1.3
|
||||
requestTimeout: 10000 # Etcd operation timeout in milliseconds
|
||||
use:
|
||||
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
|
||||
data:
|
||||
dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
|
||||
auth:
|
||||
enabled: false # Whether to enable authentication
|
||||
userName: # username for etcd authentication
|
||||
password: # password for etcd authentication
|
||||
|
||||
metastore:
|
||||
type: etcd # Default value: etcd, Valid values: [etcd, tikv]
|
||||
|
||||
# Related configuration of tikv, used to store Milvus metadata.
|
||||
# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery.
|
||||
# TiKV is a good option when the metadata size requires better horizontal scalability.
|
||||
tikv:
|
||||
endpoints: 127.0.0.1:2389 # Note that the default pd port of tikv is 2379, which conflicts with etcd.
|
||||
rootPath: by-dev # The root path where data is stored in tikv
|
||||
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
||||
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
||||
requestTimeout: 10000 # ms, tikv request timeout
|
||||
snapshotScanSize: 256 # batch size of tikv snapshot scan
|
||||
ssl:
|
||||
enabled: false # Whether to support TiKV secure connection mode
|
||||
tlsCert: # path to your cert file
|
||||
tlsKey: # path to your key file
|
||||
tlsCACert: # path to your CACert file
|
||||
|
||||
localStorage:
|
||||
path: /var/lib/milvus/data/ # please adjust in embedded Milvus: /tmp/milvus/data/
|
||||
|
||||
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
|
||||
# We refer to the storage service as MinIO/S3 in the following description for simplicity.
|
||||
minio:
|
||||
address: localhost # Address of MinIO/S3
|
||||
port: 9000 # Port of MinIO/S3
|
||||
accessKeyID: minioadmin # accessKeyID of MinIO/S3
|
||||
secretAccessKey: minioadmin # MinIO/S3 encryption string
|
||||
useSSL: false # Access to MinIO/S3 with SSL
|
||||
ssl:
|
||||
tlsCACert: /path/to/public.crt # path to your CACert file
|
||||
bucketName: a-bucket # Bucket name in MinIO/S3
|
||||
rootPath: files # The root path where the message is stored in MinIO/S3
|
||||
# Whether to useIAM role to access S3/GCS instead of access/secret keys
|
||||
# For more information, refer to
|
||||
# aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
|
||||
# gcp: https://cloud.google.com/storage/docs/access-control/iam
|
||||
# aliyun (ack): https://www.alibabacloud.com/help/en/container-service-for-kubernetes/latest/use-rrsa-to-enforce-access-control
|
||||
# aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role
|
||||
useIAM: false
|
||||
# Cloud Provider of S3. Supports: "aws", "gcp", "aliyun".
|
||||
# You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
|
||||
# You can use "gcp" for other cloud provider supports S3 API with signature v2
|
||||
# You can use "aliyun" for other cloud provider uses virtual host style bucket
|
||||
# When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now
|
||||
cloudProvider: aws
|
||||
# Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
|
||||
# Leave it empty if you want to use AWS default endpoint
|
||||
iamEndpoint:
|
||||
logLevel: fatal # Log level for aws sdk log. Supported level: off, fatal, error, warn, info, debug, trace
|
||||
region: # Specify minio storage system location region
|
||||
useVirtualHost: false # Whether use virtual host mode for bucket
|
||||
requestTimeoutMs: 10000 # minio timeout for request time in milliseconds
|
||||
# The maximum number of objects requested per batch in minio ListObjects rpc,
|
||||
# 0 means using oss client by default, decrease these configuration if ListObjects timeout
|
||||
listObjectsMaxKeys: 0
|
||||
|
||||
# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
|
||||
# You can change your mq by setting mq.type field.
|
||||
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
|
||||
# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka
|
||||
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)
|
||||
mq:
|
||||
# Default value: "default"
|
||||
# Valid values: [default, pulsar, kafka, rocksmq, natsmq]
|
||||
type: default
|
||||
enablePursuitMode: true # Default value: "true"
|
||||
pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds
|
||||
pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes
|
||||
mqBufSize: 16 # MQ client consumer buffer length
|
||||
dispatcher:
|
||||
mergeCheckInterval: 1 # the interval time(in seconds) for dispatcher to check whether to merge
|
||||
targetBufSize: 16 # the length of channel buffer for targe
|
||||
maxTolerantLag: 3 # Default value: "3", the timeout(in seconds) that target sends msgPack
|
||||
|
||||
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
|
||||
pulsar:
|
||||
address: localhost # Address of pulsar
|
||||
port: 6650 # Port of Pulsar
|
||||
webport: 80 # Web port of pulsar, if you connect directly without proxy, should use 8080
|
||||
maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
|
||||
tenant: public
|
||||
namespace: default
|
||||
requestTimeout: 60 # pulsar client global request timeout in seconds
|
||||
enableClientMetrics: false # Whether to register pulsar client metrics into milvus metrics path.
|
||||
|
||||
# If you want to enable kafka, needs to comment the pulsar configs
|
||||
# kafka:
|
||||
# brokerList:
|
||||
# saslUsername:
|
||||
# saslPassword:
|
||||
# saslMechanisms:
|
||||
# securityProtocol:
|
||||
# ssl:
|
||||
# enabled: false # whether to enable ssl mode
|
||||
# tlsCert: # path to client's public key (PEM) used for authentication
|
||||
# tlsKey: # path to client's private key (PEM) used for authentication
|
||||
# tlsCaCert: # file or directory path to CA certificate(s) for verifying the broker's key
|
||||
# tlsKeyPassword: # private key passphrase for use with ssl.key.location and set_ssl_cert(), if any
|
||||
# readTimeout: 10
|
||||
|
||||
rocksmq:
|
||||
# The path where the message is stored in rocksmq
|
||||
# please adjust in embedded Milvus: /tmp/milvus/rdb_data
|
||||
path: /var/lib/milvus/rdb_data
|
||||
lrucacheratio: 0.06 # rocksdb cache memory ratio
|
||||
rocksmqPageSize: 67108864 # 64 MB, 64 * 1024 * 1024 bytes, The size of each page of messages in rocksmq
|
||||
retentionTimeInMinutes: 4320 # 3 days, 3 * 24 * 60 minutes, The retention time of the message in rocksmq.
|
||||
retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
|
||||
compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
|
||||
compressionTypes: 0,0,7,7,7 # compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level.
|
||||
|
||||
# natsmq configuration.
|
||||
# more detail: https://docs.nats.io/running-a-nats-service/configuration
|
||||
natsmq:
|
||||
server:
|
||||
port: 4222 # Port for nats server listening
|
||||
storeDir: /var/lib/milvus/nats # Directory to use for JetStream storage of nats
|
||||
maxFileStore: 17179869184 # Maximum size of the 'file' storage
|
||||
maxPayload: 8388608 # Maximum number of bytes in a message payload
|
||||
maxPending: 67108864 # Maximum number of bytes buffered for a connection Applies to client connections
|
||||
initializeTimeout: 4000 # waiting for initialization of natsmq finished
|
||||
monitor:
|
||||
trace: false # If true enable protocol trace log messages
|
||||
debug: false # If true enable debug log messages
|
||||
logTime: true # If set to false, log without timestamps.
|
||||
logFile: /tmp/milvus/logs/nats.log # Log file path relative to .. of milvus binary if use relative path
|
||||
logSizeLimit: 536870912 # Size in bytes after the log file rolls over to a new one
|
||||
retention:
|
||||
maxAge: 4320 # Maximum age of any message in the P-channel
|
||||
maxBytes: # How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size
|
||||
maxMsgs: # How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit
|
||||
|
||||
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
|
||||
rootCoord:
|
||||
dmlChannelNum: 16 # The number of dml channels created at system startup
|
||||
maxPartitionNum: 1024 # Maximum number of partitions in a collection
|
||||
minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed
|
||||
enableActiveStandby: false
|
||||
maxDatabaseNum: 64 # Maximum number of database
|
||||
maxGeneralCapacity: 65536 # upper limit for the sum of of product of partitionNumber and shardNumber
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 53100
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Related configuration of proxy, used to validate client requests and reduce the returned results.
|
||||
proxy:
|
||||
timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
|
||||
healthCheckTimeout: 3000 # ms, the interval that to do component healthy check
|
||||
msgStream:
|
||||
timeTick:
|
||||
bufSize: 512
|
||||
maxNameLength: 255 # Maximum length of name for a collection or alias
|
||||
# Maximum number of fields in a collection.
|
||||
# As of today (2.2.0 and after) it is strongly DISCOURAGED to set maxFieldNum >= 64.
|
||||
# So adjust at your risk!
|
||||
maxFieldNum: 64
|
||||
maxVectorFieldNum: 4 # Maximum number of vector fields in a collection.
|
||||
maxShardNum: 16 # Maximum number of shards in a collection
|
||||
maxDimension: 32768 # Maximum dimension of a vector
|
||||
# Whether to produce gin logs.\n
|
||||
# please adjust in embedded Milvus: false
|
||||
ginLogging: true
|
||||
ginLogSkipPaths: / # skip url path for gin log
|
||||
maxTaskNum: 1024 # max task number of proxy task queue
|
||||
mustUsePartitionKey: false # switch for whether proxy must use partition key for the collection
|
||||
accessLog:
|
||||
enable: false # if use access log
|
||||
minioEnable: false # if upload sealed access log file to minio
|
||||
localPath: /tmp/milvus_access
|
||||
filename: # Log filename, leave empty to use stdout.
|
||||
maxSize: 64 # Max size for a single file, in MB.
|
||||
cacheSize: 10240 # Size of log of memory cache, in B
|
||||
rotatedTime: 0 # Max time for single access log file in seconds
|
||||
remotePath: access_log/ # File path in minIO
|
||||
remoteMaxTime: 0 # Max time for log file in minIO, in hours
|
||||
formatters:
|
||||
base:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
|
||||
query:
|
||||
format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
|
||||
methods: "Query,Search,Delete"
|
||||
connectionCheckIntervalSeconds: 120 # the interval time(in seconds) for connection manager to scan inactive client info
|
||||
connectionClientInfoTTLSeconds: 86400 # inactive client info TTL duration, in seconds
|
||||
maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos
|
||||
gracefulStopTimeout: 30 # seconds. force stop node without graceful stop
|
||||
slowQuerySpanInSeconds: 5 # query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds.
|
||||
http:
|
||||
enabled: true # Whether to enable the http server
|
||||
debug_mode: false # Whether to enable http server debug mode
|
||||
port: # high-level restful api
|
||||
acceptTypeAllowInt64: true # high-level restful api, whether http client can deal with int64
|
||||
enablePprof: true # Whether to enable pprof middleware on the metrics port
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19530
|
||||
internalPort: 19529
|
||||
grpc:
|
||||
serverMaxSendSize: 268435456
|
||||
serverMaxRecvSize: 67108864
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 67108864
|
||||
|
||||
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
|
||||
queryCoord:
|
||||
taskMergeCap: 1
|
||||
taskExecutionCap: 256
|
||||
autoHandoff: true # Enable auto handoff
|
||||
autoBalance: true # Enable auto balance
|
||||
autoBalanceChannel: true # Enable auto balance channel
|
||||
balancer: ScoreBasedBalancer # auto balancer used for segments on queryNodes
|
||||
globalRowCountFactor: 0.1 # the weight used when balancing segments among queryNodes
|
||||
scoreUnbalanceTolerationFactor: 0.05 # the least value for unbalanced extent between from and to nodes when doing balance
|
||||
reverseUnBalanceTolerationFactor: 1.3 # the largest value for unbalanced extent between from and to nodes after doing balance
|
||||
overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
|
||||
balanceIntervalSeconds: 60
|
||||
memoryUsageMaxDifferencePercentage: 30
|
||||
rowCountFactor: 0.4 # the row count weight used when balancing segments among queryNodes
|
||||
segmentCountFactor: 0.4 # the segment count weight used when balancing segments among queryNodes
|
||||
globalSegmentCountFactor: 0.1 # the segment count weight used when balancing segments among queryNodes
|
||||
segmentCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
rowCountMaxSteps: 50 # segment count based plan generator max steps
|
||||
randomMaxSteps: 10 # segment count based plan generator max steps
|
||||
growingRowCountWeight: 4 # the memory weight of growing segment row count
|
||||
balanceCostThreshold: 0.001 # the threshold of balance cost, if the difference of cluster's cost after executing the balance plan is less than this value, the plan will not be executed
|
||||
checkSegmentInterval: 1000
|
||||
checkChannelInterval: 1000
|
||||
checkBalanceInterval: 10000
|
||||
checkIndexInterval: 10000
|
||||
channelTaskTimeout: 60000 # 1 minute
|
||||
segmentTaskTimeout: 120000 # 2 minute
|
||||
distPullInterval: 500
|
||||
collectionObserverInterval: 200
|
||||
checkExecutedFlagInterval: 100
|
||||
heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
|
||||
loadTimeoutSeconds: 600
|
||||
distRequestTimeout: 5000 # the request timeout for querycoord fetching data distribution from querynodes, in milliseconds
|
||||
heatbeatWarningLag: 5000 # the lag value for querycoord report warning when last heartbeat is too old, in milliseconds
|
||||
checkHandoffInterval: 5000
|
||||
enableActiveStandby: false
|
||||
checkInterval: 1000
|
||||
checkHealthInterval: 3000 # 3s, the interval when query coord try to check health of query node
|
||||
checkHealthRPCTimeout: 2000 # 100ms, the timeout of check health rpc to query node
|
||||
brokerTimeout: 5000 # 5000ms, querycoord broker rpc timeout
|
||||
collectionRecoverTimes: 3 # if collection recover times reach the limit during loading state, release it
|
||||
observerTaskParallel: 16 # the parallel observer dispatcher task number
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
checkNodeSessionInterval: 60 # the interval(in seconds) of check querynode cluster session
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
enableStoppingBalance: true # whether enable stopping balance
|
||||
channelExclusiveNodeFactor: 4 # the least node number for enable channel's exclusive mode
|
||||
cleanExcludeSegmentInterval: 60 # the time duration of clean pipeline exclude segment which used for filter invalid data, in seconds
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 19531
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
|
||||
queryNode:
|
||||
stats:
|
||||
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
||||
segcore:
|
||||
knowhereThreadPoolNumRatio: 4 # The number of threads in knowhere's thread pool. If disk is enabled, the pool size will multiply with knowhereThreadPoolNumRatio([1, 32]).
|
||||
chunkRows: 128 # The number of vectors in a chunk.
|
||||
interimIndex:
|
||||
enableIndex: true # Enable segment build with index to accelerate vector search when segment is in growing or binlog.
|
||||
nlist: 128 # temp index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
|
||||
nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
|
||||
memExpansionRate: 1.15 # extra memory needed by building interim index
|
||||
buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num
|
||||
knowhereScoreConsistency: false # Enable knowhere strong consistency score computation logic
|
||||
loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
|
||||
enableDisk: false # enable querynode load disk index, and search on disk index
|
||||
maxDiskUsagePercentage: 95
|
||||
cache:
|
||||
enabled: true
|
||||
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
|
||||
readAheadPolicy: willneed # The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed`
|
||||
# options: async, sync, disable.
|
||||
# Specifies the necessity for warming up the chunk cache.
|
||||
# 1. If set to "sync" or "async" the original vector data will be synchronously/asynchronously loaded into the
|
||||
# chunk cache during the load process. This approach has the potential to substantially reduce query/search latency
|
||||
# for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage;
|
||||
# 2. If set to "disable" original vector data will only be loaded into the chunk cache during search/query.
|
||||
warmup: disable
|
||||
mmap:
|
||||
mmapEnabled: false # Enable mmap for loading data
|
||||
lazyload:
|
||||
enabled: false # Enable lazyload for loading data
|
||||
waitTimeout: 30000 # max wait timeout duration in milliseconds before start to do lazyload search and retrieve
|
||||
requestResourceTimeout: 5000 # max timeout in milliseconds for waiting request resource for lazy load, 5s by default
|
||||
requestResourceRetryInterval: 2000 # retry interval in milliseconds for waiting request resource for lazy load, 2s by default
|
||||
maxRetryTimes: 1 # max retry times for lazy load, 1 by default
|
||||
maxEvictPerRetry: 1 # max evict count for lazy load, 1 by default
|
||||
grouping:
|
||||
enabled: true
|
||||
maxNQ: 1000
|
||||
topKMergeRatio: 20
|
||||
scheduler:
|
||||
receiveChanSize: 10240
|
||||
unsolvedQueueSize: 10240
|
||||
# maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
|
||||
# Max read concurrency would be the value of hardware.GetCPUNum * maxReadConcurrentRatio.
|
||||
# It defaults to 2.0, which means max read concurrency would be the value of hardware.GetCPUNum * 2.
|
||||
# Max read concurrency must greater than or equal to 1, and less than or equal to hardware.GetCPUNum * 100.
|
||||
# (0, 100]
|
||||
maxReadConcurrentRatio: 1
|
||||
cpuRatio: 10 # ratio used to estimate read task cpu usage.
|
||||
maxTimestampLag: 86400
|
||||
scheduleReadPolicy:
|
||||
# fifo: A FIFO queue support the schedule.
|
||||
# user-task-polling:
|
||||
# The user's tasks will be polled one by one and scheduled.
|
||||
# Scheduling is fair on task granularity.
|
||||
# The policy is based on the username for authentication.
|
||||
# And an empty username is considered the same user.
|
||||
# When there are no multi-users, the policy decay into FIFO"
|
||||
name: fifo
|
||||
taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty
|
||||
enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other)
|
||||
maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
enableSegmentPrune: false # use partition prune function on shard delegator
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21123
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
indexCoord:
|
||||
bindIndexNodeMode:
|
||||
enable: false
|
||||
address: localhost:22930
|
||||
withCred: false
|
||||
nodeID: 0
|
||||
segment:
|
||||
minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed
|
||||
|
||||
indexNode:
|
||||
scheduler:
|
||||
buildParallel: 1
|
||||
enableDisk: true # enable index node build disk vector index
|
||||
maxDiskUsagePercentage: 95
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21121
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
dataCoord:
|
||||
channel:
|
||||
watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
|
||||
balanceWithRpc: true # Whether to enable balance with RPC, default to use etcd watch
|
||||
legacyVersionWithoutRPCWatch: 2.4.1 # Datanodes <= this version are considered as legacy nodes, which doesn't have rpc based watch(). This is only used during rolling upgrade where legacy nodes won't get new channels
|
||||
balanceSilentDuration: 300 # The duration after which the channel manager start background channel balancing
|
||||
balanceInterval: 360 # The interval with which the channel manager check dml channel balance status
|
||||
checkInterval: 1 # The interval in seconds with which the channel manager advances channel states
|
||||
notifyChannelOperationTimeout: 5 # Timeout notifing channel operations (in seconds).
|
||||
segment:
|
||||
maxSize: 1024 # Maximum size of a segment in MB
|
||||
diskSegmentMaxSize: 2048 # Maximum size of a segment in MB for collection which has Disk index
|
||||
sealProportion: 0.12
|
||||
assignmentExpiration: 2000 # The time of the assignment expiration in ms
|
||||
allocLatestExpireAttempt: 200 # The time attempting to alloc latest lastExpire from rootCoord after restart
|
||||
maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
|
||||
# If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
|
||||
# minSizeFromIdleToSealed, Milvus will automatically seal it.
|
||||
# The max idle time of segment in seconds, 10*60.
|
||||
maxIdleTime: 600
|
||||
minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
|
||||
# The max number of binlog file for one segment, the segment will be sealed if
|
||||
# the number of binlog file reaches to max value.
|
||||
maxBinlogFileNumber: 32
|
||||
smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than
|
||||
# (smallProportion * segment max # of rows).
|
||||
# A compaction will happen on small segments if the segment after compaction will have
|
||||
compactableProportion: 0.85
|
||||
# over (compactableProportion * segment max # of rows) rows.
|
||||
# MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
|
||||
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
|
||||
expansionRate: 1.25
|
||||
autoUpgradeSegmentIndex: false # whether auto upgrade segment index to index engine's version
|
||||
enableCompaction: true # Enable data segment compaction
|
||||
compaction:
|
||||
enableAutoCompaction: true
|
||||
indexBasedCompaction: true
|
||||
rpcTimeout: 10
|
||||
maxParallelTaskNum: 10
|
||||
workerMaxParallelTaskNum: 2
|
||||
levelzero:
|
||||
forceTrigger:
|
||||
minSize: 8388608 # The minimum size in bytes to force trigger a LevelZero Compaction, default as 8MB
|
||||
maxSize: 67108864 # The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB
|
||||
deltalogMinNum: 10 # The minimum number of deltalog files to force trigger a LevelZero Compaction
|
||||
deltalogMaxNum: 30 # The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30
|
||||
enableGarbageCollection: true
|
||||
gc:
|
||||
interval: 3600 # gc interval in seconds
|
||||
missingTolerance: 86400 # file meta missing tolerance duration in seconds, default to 24hr(1d)
|
||||
dropTolerance: 10800 # file belongs to dropped entity tolerance duration in seconds. 3600
|
||||
removeConcurrent: 32 # number of concurrent goroutines to remove dropped s3 objects
|
||||
scanInterval: 168 # garbage collection scan residue interval in hours
|
||||
enableActiveStandby: false
|
||||
brokerTimeout: 5000 # 5000ms, dataCoord broker rpc timeout
|
||||
autoBalance: true # Enable auto balance
|
||||
checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
|
||||
import:
|
||||
filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task.
|
||||
taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state.
|
||||
maxSizeInMBPerImportTask: 6144 # To prevent generating of small segments, we will re-group imported files. This parameter represents the sum of file sizes in each group (each ImportTask).
|
||||
scheduleInterval: 2 # The interval for scheduling import, measured in seconds.
|
||||
checkIntervalHigh: 2 # The interval for checking import, measured in seconds, is set to a high frequency for the import checker.
|
||||
checkIntervalLow: 120 # The interval for checking import, measured in seconds, is set to a low frequency for the import checker.
|
||||
maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request.
|
||||
waitForIndex: true # Indicates whether the import operation waits for the completion of index building.
|
||||
gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 13333
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
dataNode:
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
||||
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
||||
maxParallelSyncMgrTasks: 256 # The max concurrent sync task number of datanode sync mgr globally
|
||||
skipMode:
|
||||
enable: true # Support skip some timetick message to reduce CPU usage
|
||||
skipNum: 4 # Consume one for every n records skipped
|
||||
coldTime: 60 # Turn on skip mode after there are only timetick msg for x seconds
|
||||
segment:
|
||||
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
|
||||
deleteBufBytes: 16777216 # Max buffer size in bytes to flush del for a single channel, default as 16MB
|
||||
syncPeriod: 600 # The period to sync segments if buffer is not empty.
|
||||
memory:
|
||||
forceSyncEnable: true # Set true to force sync if memory usage is too high
|
||||
forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced.
|
||||
checkInterval: 3000 # the interval to check datanode memory usage, in milliseconds
|
||||
forceSyncWatermark: 0.5 # memory watermark for standalone, upon reaching this watermark, segments will be synced.
|
||||
timetick:
|
||||
byRPC: true
|
||||
interval: 500
|
||||
channel:
|
||||
# specify the size of global work pool of all channels
|
||||
# if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
|
||||
# suggest to set it bigger on large collection numbers to avoid blocking
|
||||
workPoolSize: -1
|
||||
# specify the size of global work pool for channel checkpoint updating
|
||||
# if this parameter <= 0, will set it as 10
|
||||
updateChannelCheckpointMaxParallel: 10
|
||||
updateChannelCheckpointInterval: 60 # the interval duration(in seconds) for datanode to update channel checkpoint of each channel
|
||||
updateChannelCheckpointRPCTimeout: 20 # timeout in seconds for UpdateChannelCheckpoint RPC call
|
||||
maxChannelCheckpointsPerPRC: 128 # The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC.
|
||||
channelCheckpointUpdateTickInSeconds: 10 # The frequency, in seconds, at which the channel checkpoint updater executes updates.
|
||||
import:
|
||||
maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode.
|
||||
maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files.
|
||||
readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import.
|
||||
compaction:
|
||||
levelZeroBatchMemoryRatio: 0.05 # The minimal memory ratio of free memory for level zero compaction executing in batch mode
|
||||
gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop
|
||||
ip: # if not specified, use the first unicastable address
|
||||
port: 21124
|
||||
grpc:
|
||||
serverMaxSendSize: 536870912
|
||||
serverMaxRecvSize: 268435456
|
||||
clientMaxSendSize: 268435456
|
||||
clientMaxRecvSize: 536870912
|
||||
|
||||
# Configures the system log output.
|
||||
log:
|
||||
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
||||
file:
|
||||
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
|
||||
maxSize: 300 # MB
|
||||
maxAge: 10 # Maximum time for log retention in day.
|
||||
maxBackups: 20
|
||||
format: text # text or json
|
||||
stdout: true # Stdout enable or not
|
||||
|
||||
grpc:
|
||||
log:
|
||||
level: WARNING
|
||||
gracefulStopTimeout: 10 # second, time to wait graceful stop finish
|
||||
client:
|
||||
compressionEnabled: false
|
||||
dialTimeout: 200
|
||||
keepAliveTime: 10000
|
||||
keepAliveTimeout: 20000
|
||||
maxMaxAttempts: 10
|
||||
initialBackoff: 0.2
|
||||
maxBackoff: 10
|
||||
minResetInterval: 1000
|
||||
maxCancelError: 32
|
||||
minSessionCheckInterval: 200
|
||||
|
||||
# Configure the proxy tls enable.
|
||||
tls:
|
||||
serverPemPath: configs/cert/server.pem
|
||||
serverKeyPath: configs/cert/server.key
|
||||
caPemPath: configs/cert/ca.pem
|
||||
|
||||
common:
|
||||
defaultPartitionName: _default # default partition name for a collection
|
||||
defaultIndexName: _default_idx # default index name
|
||||
entityExpiration: -1 # Entity expiration in seconds, CAUTION -1 means never expire
|
||||
indexSliceSize: 16 # MB
|
||||
threadCoreCoefficient:
|
||||
highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority pool
|
||||
middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority pool
|
||||
lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority pool
|
||||
buildIndexThreadPoolRatio: 0.75
|
||||
DiskIndex:
|
||||
MaxDegree: 56
|
||||
SearchListSize: 100
|
||||
PQCodeBudgetGBRatio: 0.125
|
||||
BuildNumThreadsRatio: 1
|
||||
SearchCacheBudgetGBRatio: 0.1
|
||||
LoadNumThreadRatio: 8
|
||||
BeamWidthRatio: 4
|
||||
gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
|
||||
gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
|
||||
storageType: remote # please adjust in embedded Milvus: local, available values are [local, remote, opendal], value minio is deprecated, use remote instead
|
||||
# Default value: auto
|
||||
# Valid values: [auto, avx512, avx2, avx, sse4_2]
|
||||
# This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
|
||||
simdType: auto
|
||||
security:
|
||||
authorizationEnabled: false
|
||||
# The superusers will ignore some system check processes,
|
||||
# like the old password verification when updating the credential
|
||||
superUsers:
|
||||
tlsMode: 0
|
||||
session:
|
||||
ttl: 30 # ttl value when session granting a lease to register service
|
||||
retryTimes: 30 # retry times when session sending etcd requests
|
||||
locks:
|
||||
metrics:
|
||||
enable: false # whether gather statistics for metrics locks
|
||||
threshold:
|
||||
info: 500 # minimum milliseconds for printing durations in info level
|
||||
warn: 1000 # minimum milliseconds for printing durations in warn level
|
||||
storage:
|
||||
scheme: s3
|
||||
enablev2: false
|
||||
ttMsgEnabled: true # Whether the instance disable sending ts messages
|
||||
traceLogMode: 0 # trace request info
|
||||
bloomFilterSize: 100000 # bloom filter initial size
|
||||
maxBloomFalsePositive: 0.001 # max false positive rate for bloom filter
|
||||
|
||||
# QuotaConfig, configurations of Milvus quota and limits.
|
||||
# By default, we enable:
|
||||
# 1. TT protection;
|
||||
# 2. Memory protection.
|
||||
# 3. Disk quota protection.
|
||||
# You can enable:
|
||||
# 1. DML throughput limitation;
|
||||
# 2. DDL, DQL qps/rps limitation;
|
||||
# 3. DQL Queue length/latency protection;
|
||||
# 4. DQL result rate protection;
|
||||
# If necessary, you can also manually force to deny RW requests.
|
||||
quotaAndLimits:
|
||||
enabled: true # `true` to enable quota and limits, `false` to disable.
|
||||
# quotaCenterCollectInterval is the time interval that quotaCenter
|
||||
# collects metrics from Proxies, Query cluster and Data cluster.
|
||||
# seconds, (0 ~ 65536)
|
||||
quotaCenterCollectInterval: 3
|
||||
ddl:
|
||||
enabled: false
|
||||
collectionRate: -1 # qps, default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
|
||||
partitionRate: -1 # qps, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
|
||||
db:
|
||||
collectionRate: -1 # qps of db level , default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
|
||||
partitionRate: -1 # qps of db level, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
|
||||
indexRate:
|
||||
enabled: false
|
||||
max: -1 # qps, default no limit, rate for CreateIndex, DropIndex
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for CreateIndex, DropIndex
|
||||
flushRate:
|
||||
enabled: true
|
||||
max: -1 # qps, default no limit, rate for flush
|
||||
collection:
|
||||
max: 0.1 # qps, default no limit, rate for flush at collection level.
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for flush
|
||||
compactionRate:
|
||||
enabled: false
|
||||
max: -1 # qps, default no limit, rate for manualCompaction
|
||||
db:
|
||||
max: -1 # qps of db level, default no limit, rate for manualCompaction
|
||||
dml:
|
||||
# dml limit rates, default no limit.
|
||||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
insertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
upsertRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
deleteRate:
|
||||
max: -1 # MB/s, default no limit
|
||||
db:
|
||||
max: -1 # MB/s, default no limit
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit
|
||||
bulkLoadRate:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
||||
db:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit db bulkLoad rate
|
||||
collection:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit collection bulkLoad rate
|
||||
partition:
|
||||
max: -1 # MB/s, default no limit, not support yet. TODO: limit partition bulkLoad rate
|
||||
dql:
|
||||
# dql limit rates, default no limit.
|
||||
# The maximum rate will not be greater than max.
|
||||
enabled: false
|
||||
searchRate:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
db:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
collection:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
partition:
|
||||
max: -1 # vps (vectors per second), default no limit
|
||||
queryRate:
|
||||
max: -1 # qps, default no limit
|
||||
db:
|
||||
max: -1 # qps, default no limit
|
||||
collection:
|
||||
max: -1 # qps, default no limit
|
||||
partition:
|
||||
max: -1 # qps, default no limit
|
||||
limits:
|
||||
maxCollectionNum: 65536
|
||||
maxCollectionNumPerDB: 65536
|
||||
maxInsertSize: -1 # maximum size of a single insert request, in bytes, -1 means no limit
|
||||
maxResourceGroupNumOfQueryNode: 1024 # maximum number of resource groups of query nodes
|
||||
limitWriting:
|
||||
# forceDeny false means dml requests are allowed (except for some
|
||||
# specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
|
||||
forceDeny: false
|
||||
ttProtection:
|
||||
enabled: false
|
||||
# maxTimeTickDelay indicates the backpressure for DML Operations.
|
||||
# DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
|
||||
# if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
|
||||
# seconds
|
||||
maxTimeTickDelay: 300
|
||||
memProtection:
|
||||
# When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
|
||||
# When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
|
||||
# When memory usage < memoryLowWaterLevel, no action.
|
||||
enabled: true
|
||||
dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes
|
||||
dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes
|
||||
queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes
|
||||
queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes
|
||||
growingSegmentsSizeProtection:
|
||||
# No action will be taken if the growing segments size is less than the low watermark.
|
||||
# When the growing segments size exceeds the low watermark, the dml rate will be reduced,
|
||||
# but the rate will not be lower than minRateRatio * dmlRate.
|
||||
enabled: false
|
||||
minRateRatio: 0.5
|
||||
lowWaterLevel: 0.2
|
||||
highWaterLevel: 0.4
|
||||
diskProtection:
|
||||
enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
|
||||
diskQuota: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerDB: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerCollection: -1 # MB, (0, +inf), default no limit
|
||||
diskQuotaPerPartition: -1 # MB, (0, +inf), default no limit
|
||||
limitReading:
|
||||
# forceDeny false means dql requests are allowed (except for some
|
||||
# specific conditions, such as collection has been dropped), true means always reject all dql requests.
|
||||
forceDeny: false
|
||||
queueProtection:
|
||||
enabled: false
|
||||
# nqInQueueThreshold indicated that the system was under backpressure for Search/Query path.
|
||||
# If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off
|
||||
# until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1.
|
||||
# int, default no limit
|
||||
nqInQueueThreshold: -1
|
||||
# queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
|
||||
# If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
|
||||
# until the latency of queuing no longer exceeds queueLatencyThreshold.
|
||||
# The latency here refers to the averaged latency over a period of time.
|
||||
# milliseconds, default no limit
|
||||
queueLatencyThreshold: -1
|
||||
resultProtection:
|
||||
enabled: false
|
||||
# maxReadResultRate indicated that the system was under backpressure for Search/Query path.
|
||||
# If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off
|
||||
# until the read result rate no longer exceeds maxReadResultRate.
|
||||
# MB/s, default no limit
|
||||
maxReadResultRate: -1
|
||||
maxReadResultRatePerDB: -1
|
||||
maxReadResultRatePerCollection: -1
|
||||
# colOffSpeed is the speed of search&query rates cool off.
|
||||
# (0, 1]
|
||||
coolOffSpeed: 0.9
|
||||
|
||||
trace:
|
||||
# trace exporter type, default is stdout,
|
||||
# optional values: ['noop','stdout', 'jaeger', 'otlp']
|
||||
exporter: noop
|
||||
# fraction of traceID based sampler,
|
||||
# optional values: [0, 1]
|
||||
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
|
||||
sampleFraction: 0
|
||||
jaeger:
|
||||
url: # when exporter is jaeger should set the jaeger's URL
|
||||
otlp:
|
||||
endpoint: # example: "127.0.0.1:4318"
|
||||
secure: true
|
||||
|
||||
#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation.
|
||||
#here, you can set the size of the memory occupied by the memory pool, with the unit being MB.
|
||||
#note that there is a possibility of Milvus crashing when the actual memory demand exceeds the value set by maxMemSize.
|
||||
#if initMemSize and MaxMemSize both set zero,
|
||||
#milvus will automatically initialize half of the available GPU memory,
|
||||
#maxMemSize will the whole available GPU memory.
|
||||
gpu:
|
||||
initMemSize: # Gpu Memory Pool init size
|
||||
maxMemSize: # Gpu Memory Pool Max size
|
||||
390
MultimodalQnA/tests/test_compose_milvus_on_xeon.sh
Normal file
390
MultimodalQnA/tests/test_compose_milvus_on_xeon.sh
Normal file
@@ -0,0 +1,390 @@
|
||||
#!/bin/bash
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export image_fn="apple.png"
|
||||
export video_fn="WeAreGoingOnBullrun.mp4"
|
||||
export caption_fn="apple.txt"
|
||||
export pdf_fn="nke-10k-2023.pdf"
|
||||
|
||||
function check_service_ready() {
|
||||
local container_name="$1"
|
||||
local max_retries="$2"
|
||||
local log_string="$3"
|
||||
|
||||
for i in $(seq 1 "$max_retries")
|
||||
do
|
||||
service_logs=$(docker logs "$container_name" 2>&1 | grep "$log_string" || true)
|
||||
if [[ -z "$service_logs" ]]; then
|
||||
echo "The $container_name service is not ready yet, sleeping 30s..."
|
||||
sleep 30s
|
||||
else
|
||||
echo "$container_name service is ready"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $i -ge $max_retries ]]; then
|
||||
echo "WARNING: Max retries reached when waiting for the $container_name service to be ready"
|
||||
docker logs "$container_name" >> "${LOG_PATH}/$container_name_file.log"
|
||||
fi
|
||||
}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever lvm-llava lvm dataprep whisper"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
function setup_env() {
|
||||
export host_ip=${ip_address}
|
||||
export MM_EMBEDDING_SERVICE_HOST_IP=${host_ip}
|
||||
export MM_RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export LVM_SERVICE_HOST_IP=${host_ip}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export WHISPER_PORT=7066
|
||||
export MAX_IMAGES=1
|
||||
export WHISPER_MODEL="base"
|
||||
export WHISPER_SERVER_ENDPOINT="http://${host_ip}:${WHISPER_PORT}/v1/asr"
|
||||
export COLLECTION_NAME="LangChainCollection"
|
||||
export MILVUS_HOST=${host_ip}
|
||||
export DATAPREP_MMR_PORT=6007
|
||||
export DATAPREP_INGEST_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/ingest"
|
||||
export DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/generate_transcripts"
|
||||
export DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/generate_captions"
|
||||
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/get"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/delete"
|
||||
export EMM_BRIDGETOWER_PORT=6006
|
||||
export BRIDGE_TOWER_EMBEDDING=true
|
||||
export EMBEDDING_MODEL_ID="BridgeTower/bridgetower-large-itm-mlm-itc"
|
||||
export MMEI_EMBEDDING_ENDPOINT="http://${host_ip}:$EMM_BRIDGETOWER_PORT"
|
||||
export MM_EMBEDDING_PORT_MICROSERVICE=6000
|
||||
export MILVUS_RETRIEVER_PORT=7000
|
||||
export LVM_PORT=9399
|
||||
export LLAVA_SERVER_PORT=8399
|
||||
export LVM_MODEL_ID="llava-hf/llava-1.5-7b-hf"
|
||||
export LVM_ENDPOINT="http://${host_ip}:$LLAVA_SERVER_PORT"
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:$MEGA_SERVICE_PORT/v1/multimodalqna"
|
||||
export UI_PORT=5173
|
||||
}
|
||||
|
||||
|
||||
function start_services() {
|
||||
echo "Starting services..."
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
sleep 2m
|
||||
echo "Services started."
|
||||
}
|
||||
|
||||
function prepare_data() {
|
||||
cd $LOG_PATH
|
||||
echo "Downloading image and video"
|
||||
wget https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true -O ${image_fn}
|
||||
wget http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/WeAreGoingOnBullrun.mp4 -O ${video_fn}
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIComps/v1.1/comps/retrievers/redis/data/nke-10k-2023.pdf -O ${pdf_fn}
|
||||
echo "Writing caption file"
|
||||
echo "This is an apple." > ${caption_fn}
|
||||
sleep 1m
|
||||
}
|
||||
|
||||
|
||||
function validate_service() {
|
||||
local URL="$1"
|
||||
local EXPECTED_RESULT="$2"
|
||||
local SERVICE_NAME="$3"
|
||||
local DOCKER_NAME="$4"
|
||||
local INPUT_DATA="$5"
|
||||
|
||||
if [[ $SERVICE_NAME == *"dataprep-multimodal-milvus-transcript"* ]]; then
|
||||
cd $LOG_PATH
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "files=@./${video_fn}" -H 'Content-Type: multipart/form-data' "$URL")
|
||||
elif [[ $SERVICE_NAME == *"dataprep-multimodal-milvus-caption"* ]]; then
|
||||
cd $LOG_PATH
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "files=@./${image_fn}" -H 'Content-Type: multipart/form-data' "$URL")
|
||||
elif [[ $SERVICE_NAME == *"dataprep-multimodal-milvus-ingest"* ]]; then
|
||||
cd $LOG_PATH
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "files=@./${image_fn}" -F "files=@./apple.txt" -H 'Content-Type: multipart/form-data' "$URL")
|
||||
elif [[ $SERVICE_NAME == *"dataprep-multimodal-milvus-pdf"* ]]; then
|
||||
cd $LOG_PATH
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "files=@./${pdf_fn}" -H 'Content-Type: multipart/form-data' "$URL")
|
||||
elif [[ $SERVICE_NAME == *"dataprep_get"* ]]; then
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' "$URL")
|
||||
elif [[ $SERVICE_NAME == *"dataprep_del"* ]]; then
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d '{"file_path": "apple.txt"}' -H 'Content-Type: application/json' "$URL")
|
||||
else
|
||||
HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
|
||||
fi
|
||||
HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
|
||||
RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g')
|
||||
|
||||
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
|
||||
|
||||
# check response status
|
||||
if [ "$HTTP_STATUS" -ne "200" ]; then
|
||||
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
|
||||
exit 1
|
||||
else
|
||||
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
|
||||
fi
|
||||
# check response body
|
||||
if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then
|
||||
echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY"
|
||||
exit 1
|
||||
else
|
||||
echo "[ $SERVICE_NAME ] Content is as expected."
|
||||
fi
|
||||
|
||||
sleep 1s
|
||||
}
|
||||
|
||||
function validate_microservices() {
|
||||
# Check if the microservices are running correctly.
|
||||
|
||||
# Bridgetower Embedding Server
|
||||
echo "Validating embedding-multimodal-bridgetower"
|
||||
validate_service \
|
||||
"http://${host_ip}:${EMM_BRIDGETOWER_PORT}/v1/encode" \
|
||||
'"embedding":[' \
|
||||
"embedding-multimodal-bridgetower" \
|
||||
"embedding-multimodal-bridgetower" \
|
||||
'{"text":"This is example"}'
|
||||
|
||||
validate_service \
|
||||
"http://${host_ip}:${EMM_BRIDGETOWER_PORT}/v1/encode" \
|
||||
'"embedding":[' \
|
||||
"embedding-multimodal-bridgetower" \
|
||||
"embedding-multimodal-bridgetower" \
|
||||
'{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
|
||||
# embedding microservice
|
||||
echo "Validating embedding"
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text" : "This is some sample text."}'
|
||||
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text": {"text" : "This is some sample text."}, "image" : {"url": "https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true"}}'
|
||||
|
||||
sleep 1m # retrieval can't curl as expected, try to wait for more time
|
||||
|
||||
# test data prep
|
||||
echo "Validating Data Prep with Generating Transcript for Video"
|
||||
validate_service \
|
||||
"${DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT}" \
|
||||
"Data preparation succeeded" \
|
||||
"dataprep-multimodal-milvus-transcript" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
echo "Validating Data Prep with Image & Caption Ingestion"
|
||||
validate_service \
|
||||
"${DATAPREP_INGEST_SERVICE_ENDPOINT}" \
|
||||
"Data preparation succeeded" \
|
||||
"dataprep-multimodal-milvus-ingest" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
echo "Validating Data Prep with PDF"
|
||||
validate_service \
|
||||
"${DATAPREP_INGEST_SERVICE_ENDPOINT}" \
|
||||
"Data preparation succeeded" \
|
||||
"dataprep-multimodal-milvus-pdf" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
echo "Validating get file returns mp4"
|
||||
validate_service \
|
||||
"${DATAPREP_GET_FILE_ENDPOINT}" \
|
||||
'.mp4' \
|
||||
"dataprep_get" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
echo "Validating get file returns png"
|
||||
validate_service \
|
||||
"${DATAPREP_GET_FILE_ENDPOINT}" \
|
||||
'.png' \
|
||||
"dataprep_get" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
sleep 1m
|
||||
|
||||
# multimodal retrieval microservice
|
||||
echo "Validating retriever-milvus"
|
||||
your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(512)]; print(embedding)")
|
||||
validate_service \
|
||||
"http://${host_ip}:${MILVUS_RETRIEVER_PORT}/v1/retrieval" \
|
||||
"retrieved_docs" \
|
||||
"retriever-milvus" \
|
||||
"retriever-milvus" \
|
||||
"{\"text\":\"test\",\"embedding\":${your_embedding}}"
|
||||
|
||||
echo "Wait for lvm-llava service to be ready"
|
||||
check_service_ready "lvm-llava" 10 "Uvicorn running on http://"
|
||||
|
||||
# llava server
|
||||
echo "Evaluating lvm-llava"
|
||||
validate_service \
|
||||
"http://${host_ip}:${LLAVA_SERVER_PORT}/generate" \
|
||||
'"text":' \
|
||||
"lvm-llava" \
|
||||
"lvm-llava" \
|
||||
'{"prompt":"Describe the image please.", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
|
||||
echo "Evaluating lvm-llava with a list of images"
|
||||
validate_service \
|
||||
"http://${host_ip}:${LLAVA_SERVER_PORT}/generate" \
|
||||
'"text":' \
|
||||
"lvm-llava" \
|
||||
"lvm-llava" \
|
||||
'{"prompt":"Describe the image please.", "img_b64_str": ["iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC","iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mNkYPhfz0AEYBxVSF+FAP5FDvcfRYWgAAAAAElFTkSuQmCC"]}'
|
||||
|
||||
# lvm
|
||||
echo "Evaluating lvm"
|
||||
validate_service \
|
||||
"http://${host_ip}:${LVM_PORT}/v1/lvm" \
|
||||
'"text":"' \
|
||||
"lvm" \
|
||||
"lvm" \
|
||||
'{"retrieved_docs": [], "initial_query": "What is this?", "top_n": 1, "metadata": [{"b64_img_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "transcript_for_inference": "yellow image", "video_id": "8c7461df-b373-4a00-8696-9a2234359fe0", "time_of_frame_ms":"37000000", "source_video":"WeAreGoingOnBullrun_8c7461df-b373-4a00-8696-9a2234359fe0.mp4"}], "chat_template":"The caption of the image is: '\''{context}'\''. {question}"}'
|
||||
|
||||
# data prep requiring lvm
|
||||
echo "Validating Data Prep with Generating Caption for Image"
|
||||
validate_service \
|
||||
"${DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT}" \
|
||||
"Data preparation succeeded" \
|
||||
"dataprep-multimodal-milvus-caption" \
|
||||
"dataprep-multimodal-milvus"
|
||||
|
||||
sleep 3m
|
||||
}
|
||||
|
||||
function validate_megaservice() {
|
||||
# Curl the Mega Service with retrieval
|
||||
echo "Validating megaservice with first query"
|
||||
validate_service \
|
||||
"http://${host_ip}:${MEGA_SERVICE_PORT}/v1/multimodalqna" \
|
||||
'"time_of_frame_ms":' \
|
||||
"multimodalqna" \
|
||||
"multimodalqna-backend-server" \
|
||||
'{"messages": "What is the revenue of Nike in 2023?"}'
|
||||
|
||||
echo "Validating megaservice with first audio query"
|
||||
validate_service \
|
||||
"http://${host_ip}:${MEGA_SERVICE_PORT}/v1/multimodalqna" \
|
||||
'"time_of_frame_ms":' \
|
||||
"multimodalqna" \
|
||||
"multimodalqna-backend-server" \
|
||||
'{"messages": [{"role": "user", "content": [{"type": "audio", "audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}]}]}'
|
||||
|
||||
echo "Validating megaservice with first query with an image"
|
||||
validate_service \
|
||||
"http://${host_ip}:${MEGA_SERVICE_PORT}/v1/multimodalqna" \
|
||||
'"time_of_frame_ms":' \
|
||||
"multimodalqna" \
|
||||
"multimodalqna-backend-server" \
|
||||
'{"messages": [{"role": "user", "content": [{"type": "text", "text": "Find a similar image"}, {"type": "image_url", "image_url": {"url": "https://www.ilankelman.org/stopsigns/australia.jpg"}}]}]}'
|
||||
|
||||
echo "Validating megaservice with follow-up query"
|
||||
validate_service \
|
||||
"http://${host_ip}:${MEGA_SERVICE_PORT}/v1/multimodalqna" \
|
||||
'"content":"' \
|
||||
"multimodalqna" \
|
||||
"multimodalqna-backend-server" \
|
||||
'{"messages": [{"role": "user", "content": [{"type": "audio", "audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}, {"type": "image_url", "image_url": {"url": "https://www.ilankelman.org/stopsigns/australia.jpg"}}]}, {"role": "assistant", "content": "opea project! "}, {"role": "user", "content": [{"type": "text", "text": "goodbye"}]}]}'
|
||||
|
||||
echo "Validating megaservice with multiple text queries"
|
||||
validate_service \
|
||||
"http://${host_ip}:${MEGA_SERVICE_PORT}/v1/multimodalqna" \
|
||||
'"content":"' \
|
||||
"multimodalqna" \
|
||||
"multimodalqna-backend-server" \
|
||||
'{"messages": [{"role": "user", "content": [{"type": "text", "text": "hello, "}]}, {"role": "assistant", "content": "opea project! "}, {"role": "user", "content": [{"type": "text", "text": "goodbye"}]}]}'
|
||||
}
|
||||
|
||||
function validate_delete {
|
||||
echo "Validating data prep delete files"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete"
|
||||
validate_service \
|
||||
"${DATAPREP_DELETE_FILE_ENDPOINT}" \
|
||||
'{"status":true}' \
|
||||
"dataprep_del" \
|
||||
"dataprep-multimodal-milvus"
|
||||
}
|
||||
|
||||
function delete_data() {
|
||||
cd $LOG_PATH
|
||||
echo "Deleting image, video, and caption"
|
||||
rm -rf ${image_fn}
|
||||
rm -rf ${video_fn}
|
||||
rm -rf ${pdf_fn}
|
||||
rm -rf ${caption_fn}
|
||||
}
|
||||
|
||||
function stop_docker() {
|
||||
echo "Stopping docker..."
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
docker compose -f compose_milvus.yaml stop && docker compose -f compose_milvus.yaml rm -f
|
||||
echo "Docker stopped."
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
setup_env
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s" && sleep 1s
|
||||
prepare_data
|
||||
|
||||
validate_microservices
|
||||
echo "==== microservices validated ===="
|
||||
validate_megaservice
|
||||
echo "==== megaservice validated ===="
|
||||
validate_delete
|
||||
echo "==== delete validated ===="
|
||||
|
||||
delete_data
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
|
||||
}
|
||||
|
||||
main
|
||||
Reference in New Issue
Block a user