Some checks failed
Create an issue to GenAIInfra on push / Create issue (push) Has been cancelled
Nightly build/publish latest docker images / get-build-matrix (push) Has been cancelled
Nightly build/publish latest docker images / build-comps-base (push) Has been cancelled
Nightly build/publish latest docker images / build-images (push) Has been cancelled
Nightly build/publish latest docker images / test-example (push) Has been cancelled
Nightly build/publish latest docker images / get-image-list (push) Has been cancelled
Nightly build/publish latest docker images / publish (push) Has been cancelled
Trellix Command Line Scanner / Trellix (push) Has been cancelled
Weekly test all examples on multiple HWs / get-test-matrix (push) Has been cancelled
Weekly test all examples on multiple HWs / build-comps-base (push) Has been cancelled
Weekly test all examples on multiple HWs / run-examples (push) Has been cancelled
Update Docker Hub Description / get-images-matrix (push) Has been cancelled
Update Docker Hub Description / check-images-matrix (push) Has been cancelled
Update Docker Hub Description / dockerHubDescription (push) Has been cancelled
Check stale issue and pr / close-issues (push) Has been cancelled
Daily update vLLM & vLLM-fork version / freeze-tag (vLLM, vllm-project/vllm, VLLM_VER) (push) Has been cancelled
Daily update vLLM & vLLM-fork version / freeze-tag (vLLM-fork, HabanaAI/vllm-fork, VLLM_FORK_VER) (push) Has been cancelled
109 lines
3.1 KiB
YAML
109 lines
3.1 KiB
YAML
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
services:
|
|
whisper-service:
|
|
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
|
container_name: whisper-service
|
|
ports:
|
|
- "7066:7066"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
restart: unless-stopped
|
|
speecht5-service:
|
|
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
|
|
container_name: speecht5-service
|
|
ports:
|
|
- "7055:7055"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
restart: unless-stopped
|
|
tgi-service:
|
|
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
|
container_name: tgi-service
|
|
ports:
|
|
- "3006:80"
|
|
volumes:
|
|
- "./data:/data"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://${host_ip}:3006/health || exit 1"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 100
|
|
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
|
wav2lip-service:
|
|
image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest}
|
|
container_name: wav2lip-service
|
|
ports:
|
|
- "7860:7860"
|
|
ipc: host
|
|
volumes:
|
|
- ${PWD}:/outputs
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
DEVICE: ${DEVICE}
|
|
INFERENCE_MODE: ${INFERENCE_MODE}
|
|
CHECKPOINT_PATH: ${CHECKPOINT_PATH}
|
|
FACE: ${FACE}
|
|
AUDIO: ${AUDIO}
|
|
FACESIZE: ${FACESIZE}
|
|
OUTFILE: ${OUTFILE}
|
|
GFPGAN_MODEL_VERSION: ${GFPGAN_MODEL_VERSION}
|
|
UPSCALE_FACTOR: ${UPSCALE_FACTOR}
|
|
FPS: ${FPS}
|
|
WAV2LIP_PORT: ${WAV2LIP_PORT}
|
|
restart: unless-stopped
|
|
animation:
|
|
image: ${REGISTRY:-opea}/animation:${TAG:-latest}
|
|
container_name: animation-server
|
|
ports:
|
|
- "3008:9066"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
WAV2LIP_ENDPOINT: ${WAV2LIP_ENDPOINT}
|
|
restart: unless-stopped
|
|
avatarchatbot-xeon-backend-server:
|
|
image: ${REGISTRY:-opea}/avatarchatbot:${TAG:-latest}
|
|
container_name: avatarchatbot-xeon-backend-server
|
|
depends_on:
|
|
- animation
|
|
ports:
|
|
- "3009:8888"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
|
- MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT}
|
|
- WHISPER_SERVER_HOST_IP=${WHISPER_SERVER_HOST_IP}
|
|
- WHISPER_SERVER_PORT=${WHISPER_SERVER_PORT}
|
|
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
|
|
- LLM_SERVER_PORT=${LLM_SERVER_PORT}
|
|
- SPEECHT5_SERVER_HOST_IP=${SPEECHT5_SERVER_HOST_IP}
|
|
- SPEECHT5_SERVER_PORT=${SPEECHT5_SERVER_PORT}
|
|
- ANIMATION_SERVICE_HOST_IP=${ANIMATION_SERVICE_HOST_IP}
|
|
- ANIMATION_SERVICE_PORT=${ANIMATION_SERVICE_PORT}
|
|
ipc: host
|
|
restart: always
|
|
|
|
networks:
|
|
default:
|
|
driver: bridge
|