Refactor Animation based on ERAG (#1079)
Signed-off-by: Yao, Qing <qing.yao@intel.com>
@@ -5,13 +5,13 @@
|
||||
services:
|
||||
animation:
|
||||
build:
|
||||
dockerfile: comps/animation/wav2lip/Dockerfile
|
||||
dockerfile: comps/animation/src/Dockerfile
|
||||
image: ${REGISTRY:-opea}/animation:${TAG:-latest}
|
||||
wav2lip:
|
||||
build:
|
||||
dockerfile: comps/animation/wav2lip/dependency/Dockerfile
|
||||
dockerfile: comps/animation/src/integration/dependency/Dockerfile
|
||||
image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest}
|
||||
wav2lip-gaudi:
|
||||
build:
|
||||
dockerfile: comps/animation/wav2lip/dependency/Dockerfile.intel_hpu
|
||||
dockerfile: comps/animation/src/integration/dependency/Dockerfile.intel_hpu
|
||||
image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest}
|
||||
|
||||
@@ -15,10 +15,10 @@ ARG ARCH=cpu
|
||||
COPY comps /home/user/comps
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir -r /home/user/comps/animation/wav2lip/requirements.txt ;
|
||||
pip install --no-cache-dir -r /home/user/comps/animation/src/requirements.txt ;
|
||||
|
||||
ENV PYTHONPATH=$PYTHONPATH:/home/user
|
||||
|
||||
WORKDIR /home/user/comps/animation/wav2lip
|
||||
WORKDIR /home/user/comps/animation/src
|
||||
|
||||
ENTRYPOINT ["python3", "animation.py"]
|
||||
ENTRYPOINT ["python3", "opea_animation_microservice.py"]
|
||||
@@ -16,19 +16,19 @@ cd GenAIComps
|
||||
- Xeon CPU
|
||||
|
||||
```bash
|
||||
docker build -t opea/wav2lip:latest -f comps/animation/wav2lip/dependency/Dockerfile .
|
||||
docker build -t opea/wav2lip:latest -f comps/animation/src/integration/dependency/Dockerfile .
|
||||
```
|
||||
|
||||
- Gaudi2 HPU
|
||||
|
||||
```bash
|
||||
docker build -t opea/wav2lip-gaudi:latest -f comps/animation/wav2lip/dependency/Dockerfile.intel_hpu .
|
||||
docker build -t opea/wav2lip-gaudi:latest -f comps/animation/src/integration/dependency/Dockerfile.intel_hpu .
|
||||
```
|
||||
|
||||
### 1.1.2 Animation server image
|
||||
|
||||
```bash
|
||||
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/Dockerfile .
|
||||
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/src/Dockerfile .
|
||||
```
|
||||
|
||||
## 1.2. Set environment variables
|
||||
@@ -78,13 +78,13 @@ export FPS=10
|
||||
- Xeon CPU
|
||||
|
||||
```bash
|
||||
docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/wav2lip -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
|
||||
docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/src -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
|
||||
```
|
||||
|
||||
- Gaudi2 HPU
|
||||
|
||||
```bash
|
||||
docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/wav2lip -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
|
||||
docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/src -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
|
||||
```
|
||||
|
||||
## 2.2 Run Animation Microservice
|
||||
@@ -101,7 +101,7 @@ Once microservice starts, user can use below script to validate the running micr
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
python3 comps/animation/wav2lip/dependency/check_wav2lip_server.py
|
||||
python3 comps/animation/src/integration/dependency/check_wav2lip_server.py
|
||||
```
|
||||
|
||||
## 3.2 Validate Animation service
|
||||
@@ -109,20 +109,20 @@ python3 comps/animation/wav2lip/dependency/check_wav2lip_server.py
|
||||
```bash
|
||||
cd GenAIComps
|
||||
export ip_address=$(hostname -I | awk '{print $1}')
|
||||
curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/wav2lip/assets/audio/sample_question.json
|
||||
curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/src/assets/audio/sample_question.json
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
python3 comps/animation/wav2lip/dependency/check_animation_server.py
|
||||
python3 comps/animation/src/integration/dependency/check_animation_server.py
|
||||
```
|
||||
|
||||
The expected output will be a message similar to the following:
|
||||
|
||||
```bash
|
||||
{'wav2lip_result': '....../GenAIComps/comps/animation/wav2lip/assets/outputs/result.mp4'}
|
||||
{'wav2lip_result': '....../GenAIComps/comps/animation/src/assets/outputs/result.mp4'}
|
||||
```
|
||||
|
||||
Please find "comps/animation/wav2lip/assets/outputs/result.mp4" as a reference generated video.
|
||||
Please find "comps/animation/src/assets/outputs/result.mp4" as a reference generated video.
|
||||
|
Before Width: | Height: | Size: 148 KiB After Width: | Height: | Size: 148 KiB |
|
Before Width: | Height: | Size: 158 KiB After Width: | Height: | Size: 158 KiB |
|
Before Width: | Height: | Size: 2.5 MiB After Width: | Height: | Size: 2.5 MiB |
|
Before Width: | Height: | Size: 992 KiB After Width: | Height: | Size: 992 KiB |
|
Before Width: | Height: | Size: 1.7 MiB After Width: | Height: | Size: 1.7 MiB |
|
Before Width: | Height: | Size: 1.6 MiB After Width: | Height: | Size: 1.6 MiB |
|
Before Width: | Height: | Size: 121 KiB After Width: | Height: | Size: 121 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
@@ -11,7 +11,7 @@ endpoint = f"http://{ip_address}:9066/v1/animation"
|
||||
outfile = os.environ.get("OUTFILE")
|
||||
|
||||
# Read the JSON file
|
||||
with open("comps/animation/wav2lip/assets/audio/sample_question.json", "r") as file:
|
||||
with open("comps/animation/src/assets/audio/sample_question.json", "r") as file:
|
||||
data = json.load(file)
|
||||
|
||||
response = requests.post(url=endpoint, json=data, headers={"Content-Type": "application/json"}, proxies={"http": None})
|
||||
2
comps/animation/src/integration/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
@@ -25,11 +25,11 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missin
|
||||
# Install GenAIComps
|
||||
RUN mkdir -p /home/user/comps
|
||||
COPY comps /home/user/comps
|
||||
COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Install ffmpeg with x264 software codec
|
||||
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/wav2lip/FFmpeg
|
||||
WORKDIR /home/user/comps/animation/wav2lip/FFmpeg
|
||||
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/src/FFmpeg
|
||||
WORKDIR /home/user/comps/animation/src/FFmpeg
|
||||
RUN ./configure --enable-gpl --enable-libx264 --enable-cross-compile && \
|
||||
make -j$(nproc-1) && \
|
||||
make install && \
|
||||
@@ -53,7 +53,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.11/site-packages/gfpgan"
|
||||
WORKDIR /usr/local/lib/python3.11/site-packages
|
||||
|
||||
# Install pip dependencies
|
||||
RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt
|
||||
RUN pip install -r /home/user/comps/animation/src/requirements.txt
|
||||
|
||||
# Custom patches
|
||||
# Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional
|
||||
@@ -66,7 +66,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d
|
||||
RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.11/site-packages/Wav2Lip/audio.py
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /home/user/comps/animation/wav2lip/
|
||||
WORKDIR /home/user/comps/animation/src/
|
||||
|
||||
# Define the command to run when the container starts
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
@@ -19,7 +19,7 @@ RUN rm -rf /var/lib/apt/lists/*
|
||||
# Install GenAIComps
|
||||
RUN mkdir -p /home/user/comps
|
||||
COPY comps /home/user/comps
|
||||
COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Install ffmpeg with x264 software codec
|
||||
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/FFmpeg
|
||||
@@ -47,7 +47,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.10/dist-packages/gfpgan"
|
||||
WORKDIR /usr/local/lib/python3.10/dist-packages
|
||||
|
||||
# Install pip dependencies
|
||||
RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt
|
||||
RUN pip install -r /home/user/comps/animation/src/requirements.txt
|
||||
|
||||
# Custom patches
|
||||
# Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional
|
||||
@@ -60,7 +60,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d
|
||||
RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.10/dist-packages/Wav2Lip/audio.py
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /home/user/comps/animation/wav2lip
|
||||
WORKDIR /home/user/comps/animation/scr
|
||||
|
||||
# Define the command to run when the container starts
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
2
comps/animation/src/integration/dependency/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
@@ -10,7 +10,7 @@ endpoint = "http://localhost:7860/v1/wav2lip"
|
||||
outfile = os.environ.get("OUTFILE")
|
||||
|
||||
# Read the JSON file
|
||||
with open("comps/animation/wav2lip/assets/audio/sample_whoareyou.json", "r") as file:
|
||||
with open("comps/animation/src/assets/audio/sample_whoareyou.json", "r") as file:
|
||||
data = json.load(file)
|
||||
|
||||
inputs = {"audio": data["byte_str"], "max_tokens": 64}
|
||||
@@ -23,7 +23,7 @@ export PT_HPU_LAZY_MODE=0
|
||||
export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=1
|
||||
|
||||
# Wav2Lip, GFPGAN
|
||||
cd /home/user/comps/animation/wav2lip/ || exit
|
||||
cd /home/user/comps/animation/src/integration/ || exit
|
||||
python3 dependency/wav2lip_server.py \
|
||||
--device $DEVICE \
|
||||
--port $((WAV2LIP_PORT)) \
|
||||
50
comps/animation/src/integration/opea.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import json
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
from comps import CustomLogger, OpeaComponent, ServiceType
|
||||
|
||||
logger = CustomLogger("opea_animation")
|
||||
logflag = os.getenv("LOGFLAG", False)
|
||||
|
||||
|
||||
class OpeaAnimation(OpeaComponent):
|
||||
"""A specialized animation component derived from OpeaComponent."""
|
||||
|
||||
def __init__(self, name: str, description: str, config: dict = None):
|
||||
super().__init__(name, ServiceType.ANIMATION.name.lower(), description, config)
|
||||
self.base_url = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860")
|
||||
|
||||
def invoke(self, input: str):
|
||||
"""Invokes the animation service to generate embeddings for the animation input.
|
||||
|
||||
Args:
|
||||
input (Audio Byte Str)
|
||||
"""
|
||||
inputs = {"audio": input}
|
||||
|
||||
response = requests.post(url=f"{self.base_url}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None})
|
||||
|
||||
outfile = response.json()["wav2lip_result"]
|
||||
return outfile
|
||||
|
||||
def check_health(self) -> bool:
|
||||
"""Checks the health of the animation service.
|
||||
|
||||
Returns:
|
||||
bool: True if the service is reachable and healthy, False otherwise.
|
||||
"""
|
||||
try:
|
||||
response = requests.get(f"{self.base_url}/v1/health")
|
||||
# If status is 200, the service is considered alive
|
||||
if response.status_code == 200:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
# Handle connection errors, timeouts, etc.
|
||||
logger.error(f"Health check failed: {e}")
|
||||
return False
|
||||
@@ -8,12 +8,11 @@ import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
# GenAIComps
|
||||
from comps import CustomLogger
|
||||
from comps import CustomLogger, OpeaComponentController
|
||||
from comps.animation.src.integration.opea import OpeaAnimation
|
||||
|
||||
logger = CustomLogger("animation")
|
||||
logger = CustomLogger("opea_animation")
|
||||
logflag = os.getenv("LOGFLAG", False)
|
||||
from comps import (
|
||||
Base64ByteStrDoc,
|
||||
@@ -25,6 +24,23 @@ from comps import (
|
||||
statistics_dict,
|
||||
)
|
||||
|
||||
# Initialize OpeaComponentController
|
||||
controller = OpeaComponentController()
|
||||
|
||||
# Register components
|
||||
try:
|
||||
# Instantiate Animation component and register it to controller
|
||||
opea_animation = OpeaAnimation(
|
||||
name="OpeaAnimation",
|
||||
description="OPEA Animation Service",
|
||||
)
|
||||
controller.register(opea_animation)
|
||||
|
||||
# Discover and activate a healthy component
|
||||
controller.discover_and_activate()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize components: {e}")
|
||||
|
||||
|
||||
# Register the microservice
|
||||
@register_microservice(
|
||||
@@ -37,19 +53,11 @@ from comps import (
|
||||
output_datatype=VideoPath,
|
||||
)
|
||||
@register_statistics(names=["opea_service@animation"])
|
||||
async def animate(audio: Base64ByteStrDoc):
|
||||
def animate(audio: Base64ByteStrDoc):
|
||||
start = time.time()
|
||||
|
||||
byte_str = audio.byte_str
|
||||
inputs = {"audio": byte_str}
|
||||
outfile = opea_animation.invoke(audio.byte_str)
|
||||
if logflag:
|
||||
logger.info(inputs)
|
||||
|
||||
response = requests.post(url=f"{wav2lip_endpoint}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None})
|
||||
|
||||
outfile = response.json()["wav2lip_result"]
|
||||
if logflag:
|
||||
logger.info(response)
|
||||
logger.info(f"Video generated successfully, check {outfile} for the result.")
|
||||
|
||||
statistics_dict["opea_service@animation"].append_latency(time.time() - start, None)
|
||||
@@ -57,6 +65,5 @@ async def animate(audio: Base64ByteStrDoc):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
wav2lip_endpoint = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860")
|
||||
logger.info("[animation - router] Animation initialized.")
|
||||
opea_microservices["opea_service@animation"].start()
|
||||
14
tests/animation/test_animation_wav2lip.sh → tests/animation/test_animation_opea.sh
Executable file → Normal file
@@ -10,14 +10,14 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
echo $(pwd)
|
||||
docker build -t opea/wav2lip:comps -f comps/animation/wav2lip/dependency/Dockerfile .
|
||||
docker build -t opea/wav2lip:comps -f comps/animation/src/integration/dependency/Dockerfile .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "opea/wav2lip built fail"
|
||||
exit 1
|
||||
else
|
||||
echo "opea/wav2lip built successful"
|
||||
fi
|
||||
docker build --no-cache -t opea/animation:comps -f comps/animation/wav2lip/Dockerfile .
|
||||
docker build --no-cache -t opea/animation:comps -f comps/animation/src/Dockerfile .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "opea/animation built fail"
|
||||
exit 1
|
||||
@@ -35,22 +35,22 @@ function start_service() {
|
||||
export ANIMATION_PORT=9066
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="assets/img/avatar1.jpg"
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="assets/outputs/result.mp4"
|
||||
export OUTFILE="/home/user/comps/animation/src/assets/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
|
||||
docker run -d --name="test-comps-animation-wav2lip" -v $WORKPATH/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT -p 7860:7860 --ipc=host opea/wav2lip:comps
|
||||
docker run -d --name="test-comps-animation" -v $WORKPATH/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e WAV2LIP_ENDPOINT=http://$ip_address:7860 -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p 9066:9066 --ipc=host opea/animation:comps
|
||||
docker run -d --name="test-comps-animation-wav2lip" -v $WORKPATH/comps/animation/src/assets:/home/user/comps/animation/src/assets -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT -p 7860:7860 --ipc=host opea/wav2lip:comps
|
||||
docker run -d --name="test-comps-animation" -v $WORKPATH/comps/animation/src/assets:/home/user/comps/animation/src/assets -e WAV2LIP_ENDPOINT=http://$ip_address:7860 -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p 9066:9066 --ipc=host opea/animation:comps
|
||||
sleep 3m
|
||||
}
|
||||
|
||||
function validate_microservice() {
|
||||
cd $WORKPATH
|
||||
result=$(http_proxy="" curl http://localhost:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/wav2lip/assets/audio/sample_question.json)
|
||||
result=$(http_proxy="" curl http://localhost:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/src/assets/audio/sample_question.json)
|
||||
if [[ $result == *"result.mp4"* ]]; then
|
||||
echo "Result correct."
|
||||
else
|
||||