diff --git a/.github/workflows/docker/compose/animation-compose.yaml b/.github/workflows/docker/compose/animation-compose.yaml index 957e4273c..32b2a247a 100644 --- a/.github/workflows/docker/compose/animation-compose.yaml +++ b/.github/workflows/docker/compose/animation-compose.yaml @@ -5,13 +5,13 @@ services: animation: build: - dockerfile: comps/animation/wav2lip/Dockerfile + dockerfile: comps/animation/src/Dockerfile image: ${REGISTRY:-opea}/animation:${TAG:-latest} wav2lip: build: - dockerfile: comps/animation/wav2lip/dependency/Dockerfile + dockerfile: comps/animation/src/integration/dependency/Dockerfile image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest} wav2lip-gaudi: build: - dockerfile: comps/animation/wav2lip/dependency/Dockerfile.intel_hpu + dockerfile: comps/animation/src/integration/dependency/Dockerfile.intel_hpu image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest} diff --git a/comps/animation/wav2lip/Dockerfile b/comps/animation/src/Dockerfile similarity index 67% rename from comps/animation/wav2lip/Dockerfile rename to comps/animation/src/Dockerfile index bc1915b6b..260817827 100644 --- a/comps/animation/wav2lip/Dockerfile +++ b/comps/animation/src/Dockerfile @@ -15,10 +15,10 @@ ARG ARCH=cpu COPY comps /home/user/comps RUN pip install --no-cache-dir --upgrade pip && \ - pip install --no-cache-dir -r /home/user/comps/animation/wav2lip/requirements.txt ; + pip install --no-cache-dir -r /home/user/comps/animation/src/requirements.txt ; ENV PYTHONPATH=$PYTHONPATH:/home/user -WORKDIR /home/user/comps/animation/wav2lip +WORKDIR /home/user/comps/animation/src -ENTRYPOINT ["python3", "animation.py"] +ENTRYPOINT ["python3", "opea_animation_microservice.py"] diff --git a/comps/animation/wav2lip/README.md b/comps/animation/src/README.md similarity index 67% rename from comps/animation/wav2lip/README.md rename to comps/animation/src/README.md index 3eb5bb477..c3855955b 100644 --- a/comps/animation/wav2lip/README.md +++ b/comps/animation/src/README.md @@ -16,19 +16,19 @@ cd GenAIComps - Xeon CPU ```bash -docker build -t opea/wav2lip:latest -f comps/animation/wav2lip/dependency/Dockerfile . +docker build -t opea/wav2lip:latest -f comps/animation/src/integration/dependency/Dockerfile . ``` - Gaudi2 HPU ```bash -docker build -t opea/wav2lip-gaudi:latest -f comps/animation/wav2lip/dependency/Dockerfile.intel_hpu . +docker build -t opea/wav2lip-gaudi:latest -f comps/animation/src/integration/dependency/Dockerfile.intel_hpu . ``` ### 1.1.2 Animation server image ```bash -docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/Dockerfile . +docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/src/Dockerfile . ``` ## 1.2. Set environment variables @@ -78,13 +78,13 @@ export FPS=10 - Xeon CPU ```bash -docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/wav2lip -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest +docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/src -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest ``` - Gaudi2 HPU ```bash -docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/wav2lip -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest +docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/src -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest ``` ## 2.2 Run Animation Microservice @@ -101,7 +101,7 @@ Once microservice starts, user can use below script to validate the running micr ```bash cd GenAIComps -python3 comps/animation/wav2lip/dependency/check_wav2lip_server.py +python3 comps/animation/src/integration/dependency/check_wav2lip_server.py ``` ## 3.2 Validate Animation service @@ -109,20 +109,20 @@ python3 comps/animation/wav2lip/dependency/check_wav2lip_server.py ```bash cd GenAIComps export ip_address=$(hostname -I | awk '{print $1}') -curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/wav2lip/assets/audio/sample_question.json +curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/src/assets/audio/sample_question.json ``` or ```bash cd GenAIComps -python3 comps/animation/wav2lip/dependency/check_animation_server.py +python3 comps/animation/src/integration/dependency/check_animation_server.py ``` The expected output will be a message similar to the following: ```bash -{'wav2lip_result': '....../GenAIComps/comps/animation/wav2lip/assets/outputs/result.mp4'} +{'wav2lip_result': '....../GenAIComps/comps/animation/src/assets/outputs/result.mp4'} ``` -Please find "comps/animation/wav2lip/assets/outputs/result.mp4" as a reference generated video. +Please find "comps/animation/src/assets/outputs/result.mp4" as a reference generated video. diff --git a/comps/animation/wav2lip/dependency/__init__.py b/comps/animation/src/__init__.py similarity index 100% rename from comps/animation/wav2lip/dependency/__init__.py rename to comps/animation/src/__init__.py diff --git a/comps/animation/wav2lip/assets/audio/eg3_ref.wav b/comps/animation/src/assets/audio/eg3_ref.wav similarity index 100% rename from comps/animation/wav2lip/assets/audio/eg3_ref.wav rename to comps/animation/src/assets/audio/eg3_ref.wav diff --git a/comps/animation/wav2lip/assets/audio/sample_question.json b/comps/animation/src/assets/audio/sample_question.json similarity index 100% rename from comps/animation/wav2lip/assets/audio/sample_question.json rename to comps/animation/src/assets/audio/sample_question.json diff --git a/comps/animation/wav2lip/assets/audio/sample_whoareyou.json b/comps/animation/src/assets/audio/sample_whoareyou.json similarity index 100% rename from comps/animation/wav2lip/assets/audio/sample_whoareyou.json rename to comps/animation/src/assets/audio/sample_whoareyou.json diff --git a/comps/animation/wav2lip/assets/img/avatar1.jpg b/comps/animation/src/assets/img/avatar1.jpg similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar1.jpg rename to comps/animation/src/assets/img/avatar1.jpg diff --git a/comps/animation/wav2lip/assets/img/avatar2.jpg b/comps/animation/src/assets/img/avatar2.jpg similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar2.jpg rename to comps/animation/src/assets/img/avatar2.jpg diff --git a/comps/animation/wav2lip/assets/img/avatar3.png b/comps/animation/src/assets/img/avatar3.png similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar3.png rename to comps/animation/src/assets/img/avatar3.png diff --git a/comps/animation/wav2lip/assets/img/avatar4.png b/comps/animation/src/assets/img/avatar4.png similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar4.png rename to comps/animation/src/assets/img/avatar4.png diff --git a/comps/animation/wav2lip/assets/img/avatar5.png b/comps/animation/src/assets/img/avatar5.png similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar5.png rename to comps/animation/src/assets/img/avatar5.png diff --git a/comps/animation/wav2lip/assets/img/avatar6.png b/comps/animation/src/assets/img/avatar6.png similarity index 100% rename from comps/animation/wav2lip/assets/img/avatar6.png rename to comps/animation/src/assets/img/avatar6.png diff --git a/comps/animation/wav2lip/assets/img/flowchart.png b/comps/animation/src/assets/img/flowchart.png similarity index 100% rename from comps/animation/wav2lip/assets/img/flowchart.png rename to comps/animation/src/assets/img/flowchart.png diff --git a/comps/animation/wav2lip/assets/img/gaudi.png b/comps/animation/src/assets/img/gaudi.png similarity index 100% rename from comps/animation/wav2lip/assets/img/gaudi.png rename to comps/animation/src/assets/img/gaudi.png diff --git a/comps/animation/wav2lip/assets/img/opea_gh_qr.png b/comps/animation/src/assets/img/opea_gh_qr.png similarity index 100% rename from comps/animation/wav2lip/assets/img/opea_gh_qr.png rename to comps/animation/src/assets/img/opea_gh_qr.png diff --git a/comps/animation/wav2lip/assets/img/opea_qr.png b/comps/animation/src/assets/img/opea_qr.png similarity index 100% rename from comps/animation/wav2lip/assets/img/opea_qr.png rename to comps/animation/src/assets/img/opea_qr.png diff --git a/comps/animation/wav2lip/assets/img/xeon.jpg b/comps/animation/src/assets/img/xeon.jpg similarity index 100% rename from comps/animation/wav2lip/assets/img/xeon.jpg rename to comps/animation/src/assets/img/xeon.jpg diff --git a/comps/animation/wav2lip/assets/outputs/results.mp4 b/comps/animation/src/assets/outputs/results.mp4 similarity index 100% rename from comps/animation/wav2lip/assets/outputs/results.mp4 rename to comps/animation/src/assets/outputs/results.mp4 diff --git a/comps/animation/wav2lip/check_animation_server.py b/comps/animation/src/check_animation_server.py similarity index 86% rename from comps/animation/wav2lip/check_animation_server.py rename to comps/animation/src/check_animation_server.py index 815271447..b4511006c 100644 --- a/comps/animation/wav2lip/check_animation_server.py +++ b/comps/animation/src/check_animation_server.py @@ -11,7 +11,7 @@ endpoint = f"http://{ip_address}:9066/v1/animation" outfile = os.environ.get("OUTFILE") # Read the JSON file -with open("comps/animation/wav2lip/assets/audio/sample_question.json", "r") as file: +with open("comps/animation/src/assets/audio/sample_question.json", "r") as file: data = json.load(file) response = requests.post(url=endpoint, json=data, headers={"Content-Type": "application/json"}, proxies={"http": None}) diff --git a/comps/animation/wav2lip/docker_run.sh b/comps/animation/src/docker_run.sh similarity index 100% rename from comps/animation/wav2lip/docker_run.sh rename to comps/animation/src/docker_run.sh diff --git a/comps/animation/src/integration/__init__.py b/comps/animation/src/integration/__init__.py new file mode 100644 index 000000000..916f3a44b --- /dev/null +++ b/comps/animation/src/integration/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/comps/animation/wav2lip/dependency/Dockerfile b/comps/animation/src/integration/dependency/Dockerfile similarity index 90% rename from comps/animation/wav2lip/dependency/Dockerfile rename to comps/animation/src/integration/dependency/Dockerfile index 2f1aa1b76..e9c90bccf 100644 --- a/comps/animation/wav2lip/dependency/Dockerfile +++ b/comps/animation/src/integration/dependency/Dockerfile @@ -25,11 +25,11 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missin # Install GenAIComps RUN mkdir -p /home/user/comps COPY comps /home/user/comps -COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh # Install ffmpeg with x264 software codec -RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/wav2lip/FFmpeg -WORKDIR /home/user/comps/animation/wav2lip/FFmpeg +RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/src/FFmpeg +WORKDIR /home/user/comps/animation/src/FFmpeg RUN ./configure --enable-gpl --enable-libx264 --enable-cross-compile && \ make -j$(nproc-1) && \ make install && \ @@ -53,7 +53,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.11/site-packages/gfpgan" WORKDIR /usr/local/lib/python3.11/site-packages # Install pip dependencies -RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt +RUN pip install -r /home/user/comps/animation/src/requirements.txt # Custom patches # Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional @@ -66,7 +66,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.11/site-packages/Wav2Lip/audio.py # Set the working directory -WORKDIR /home/user/comps/animation/wav2lip/ +WORKDIR /home/user/comps/animation/src/ # Define the command to run when the container starts RUN chmod +x /usr/local/bin/entrypoint.sh diff --git a/comps/animation/wav2lip/dependency/Dockerfile.intel_hpu b/comps/animation/src/integration/dependency/Dockerfile.intel_hpu similarity index 93% rename from comps/animation/wav2lip/dependency/Dockerfile.intel_hpu rename to comps/animation/src/integration/dependency/Dockerfile.intel_hpu index 218bfc004..fac3a7548 100644 --- a/comps/animation/wav2lip/dependency/Dockerfile.intel_hpu +++ b/comps/animation/src/integration/dependency/Dockerfile.intel_hpu @@ -19,7 +19,7 @@ RUN rm -rf /var/lib/apt/lists/* # Install GenAIComps RUN mkdir -p /home/user/comps COPY comps /home/user/comps -COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh +COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh # Install ffmpeg with x264 software codec RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/FFmpeg @@ -47,7 +47,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.10/dist-packages/gfpgan" WORKDIR /usr/local/lib/python3.10/dist-packages # Install pip dependencies -RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt +RUN pip install -r /home/user/comps/animation/src/requirements.txt # Custom patches # Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional @@ -60,7 +60,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.10/dist-packages/Wav2Lip/audio.py # Set the working directory -WORKDIR /home/user/comps/animation/wav2lip +WORKDIR /home/user/comps/animation/scr # Define the command to run when the container starts RUN chmod +x /usr/local/bin/entrypoint.sh diff --git a/comps/animation/src/integration/dependency/__init__.py b/comps/animation/src/integration/dependency/__init__.py new file mode 100644 index 000000000..916f3a44b --- /dev/null +++ b/comps/animation/src/integration/dependency/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/comps/animation/wav2lip/dependency/check_wav2lip_server.py b/comps/animation/src/integration/dependency/check_wav2lip_server.py similarity index 82% rename from comps/animation/wav2lip/dependency/check_wav2lip_server.py rename to comps/animation/src/integration/dependency/check_wav2lip_server.py index 399f027d9..c8c783838 100644 --- a/comps/animation/wav2lip/dependency/check_wav2lip_server.py +++ b/comps/animation/src/integration/dependency/check_wav2lip_server.py @@ -10,7 +10,7 @@ endpoint = "http://localhost:7860/v1/wav2lip" outfile = os.environ.get("OUTFILE") # Read the JSON file -with open("comps/animation/wav2lip/assets/audio/sample_whoareyou.json", "r") as file: +with open("comps/animation/src/assets/audio/sample_whoareyou.json", "r") as file: data = json.load(file) inputs = {"audio": data["byte_str"], "max_tokens": 64} diff --git a/comps/animation/wav2lip/dependency/download_ckpts.sh b/comps/animation/src/integration/dependency/download_ckpts.sh similarity index 100% rename from comps/animation/wav2lip/dependency/download_ckpts.sh rename to comps/animation/src/integration/dependency/download_ckpts.sh diff --git a/comps/animation/wav2lip/dependency/entrypoint.sh b/comps/animation/src/integration/dependency/entrypoint.sh similarity index 96% rename from comps/animation/wav2lip/dependency/entrypoint.sh rename to comps/animation/src/integration/dependency/entrypoint.sh index 1004b3594..37c8db22e 100644 --- a/comps/animation/wav2lip/dependency/entrypoint.sh +++ b/comps/animation/src/integration/dependency/entrypoint.sh @@ -23,7 +23,7 @@ export PT_HPU_LAZY_MODE=0 export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=1 # Wav2Lip, GFPGAN -cd /home/user/comps/animation/wav2lip/ || exit +cd /home/user/comps/animation/src/integration/ || exit python3 dependency/wav2lip_server.py \ --device $DEVICE \ --port $((WAV2LIP_PORT)) \ diff --git a/comps/animation/wav2lip/dependency/utils.py b/comps/animation/src/integration/dependency/utils.py similarity index 100% rename from comps/animation/wav2lip/dependency/utils.py rename to comps/animation/src/integration/dependency/utils.py diff --git a/comps/animation/wav2lip/dependency/wav2lip_server.py b/comps/animation/src/integration/dependency/wav2lip_server.py similarity index 100% rename from comps/animation/wav2lip/dependency/wav2lip_server.py rename to comps/animation/src/integration/dependency/wav2lip_server.py diff --git a/comps/animation/src/integration/opea.py b/comps/animation/src/integration/opea.py new file mode 100644 index 000000000..16cb2b5d1 --- /dev/null +++ b/comps/animation/src/integration/opea.py @@ -0,0 +1,50 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +import json +import os + +import requests + +from comps import CustomLogger, OpeaComponent, ServiceType + +logger = CustomLogger("opea_animation") +logflag = os.getenv("LOGFLAG", False) + + +class OpeaAnimation(OpeaComponent): + """A specialized animation component derived from OpeaComponent.""" + + def __init__(self, name: str, description: str, config: dict = None): + super().__init__(name, ServiceType.ANIMATION.name.lower(), description, config) + self.base_url = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860") + + def invoke(self, input: str): + """Invokes the animation service to generate embeddings for the animation input. + + Args: + input (Audio Byte Str) + """ + inputs = {"audio": input} + + response = requests.post(url=f"{self.base_url}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None}) + + outfile = response.json()["wav2lip_result"] + return outfile + + def check_health(self) -> bool: + """Checks the health of the animation service. + + Returns: + bool: True if the service is reachable and healthy, False otherwise. + """ + try: + response = requests.get(f"{self.base_url}/v1/health") + # If status is 200, the service is considered alive + if response.status_code == 200: + return True + else: + return False + except Exception as e: + # Handle connection errors, timeouts, etc. + logger.error(f"Health check failed: {e}") + return False diff --git a/comps/animation/wav2lip/animation.py b/comps/animation/src/opea_animation_microservice.py similarity index 63% rename from comps/animation/wav2lip/animation.py rename to comps/animation/src/opea_animation_microservice.py index bacf6b45f..13ea92cbb 100644 --- a/comps/animation/wav2lip/animation.py +++ b/comps/animation/src/opea_animation_microservice.py @@ -8,12 +8,11 @@ import json import os import time -import requests - # GenAIComps -from comps import CustomLogger +from comps import CustomLogger, OpeaComponentController +from comps.animation.src.integration.opea import OpeaAnimation -logger = CustomLogger("animation") +logger = CustomLogger("opea_animation") logflag = os.getenv("LOGFLAG", False) from comps import ( Base64ByteStrDoc, @@ -25,6 +24,23 @@ from comps import ( statistics_dict, ) +# Initialize OpeaComponentController +controller = OpeaComponentController() + +# Register components +try: + # Instantiate Animation component and register it to controller + opea_animation = OpeaAnimation( + name="OpeaAnimation", + description="OPEA Animation Service", + ) + controller.register(opea_animation) + + # Discover and activate a healthy component + controller.discover_and_activate() +except Exception as e: + logger.error(f"Failed to initialize components: {e}") + # Register the microservice @register_microservice( @@ -37,19 +53,11 @@ from comps import ( output_datatype=VideoPath, ) @register_statistics(names=["opea_service@animation"]) -async def animate(audio: Base64ByteStrDoc): +def animate(audio: Base64ByteStrDoc): start = time.time() - byte_str = audio.byte_str - inputs = {"audio": byte_str} + outfile = opea_animation.invoke(audio.byte_str) if logflag: - logger.info(inputs) - - response = requests.post(url=f"{wav2lip_endpoint}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None}) - - outfile = response.json()["wav2lip_result"] - if logflag: - logger.info(response) logger.info(f"Video generated successfully, check {outfile} for the result.") statistics_dict["opea_service@animation"].append_latency(time.time() - start, None) @@ -57,6 +65,5 @@ async def animate(audio: Base64ByteStrDoc): if __name__ == "__main__": - wav2lip_endpoint = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860") logger.info("[animation - router] Animation initialized.") opea_microservices["opea_service@animation"].start() diff --git a/comps/animation/wav2lip/requirements.txt b/comps/animation/src/requirements.txt similarity index 100% rename from comps/animation/wav2lip/requirements.txt rename to comps/animation/src/requirements.txt diff --git a/tests/animation/test_animation_wav2lip.sh b/tests/animation/test_animation_opea.sh old mode 100755 new mode 100644 similarity index 67% rename from tests/animation/test_animation_wav2lip.sh rename to tests/animation/test_animation_opea.sh index ddc0c0cb0..6aad155a7 --- a/tests/animation/test_animation_wav2lip.sh +++ b/tests/animation/test_animation_opea.sh @@ -10,14 +10,14 @@ ip_address=$(hostname -I | awk '{print $1}') function build_docker_images() { cd $WORKPATH echo $(pwd) - docker build -t opea/wav2lip:comps -f comps/animation/wav2lip/dependency/Dockerfile . + docker build -t opea/wav2lip:comps -f comps/animation/src/integration/dependency/Dockerfile . if [ $? -ne 0 ]; then echo "opea/wav2lip built fail" exit 1 else echo "opea/wav2lip built successful" fi - docker build --no-cache -t opea/animation:comps -f comps/animation/wav2lip/Dockerfile . + docker build --no-cache -t opea/animation:comps -f comps/animation/src/Dockerfile . if [ $? -ne 0 ]; then echo "opea/animation built fail" exit 1 @@ -35,22 +35,22 @@ function start_service() { export ANIMATION_PORT=9066 export INFERENCE_MODE='wav2lip+gfpgan' export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' - export FACE="assets/img/avatar1.jpg" + export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg" export AUDIO='None' export FACESIZE=96 - export OUTFILE="assets/outputs/result.mp4" + export OUTFILE="/home/user/comps/animation/src/assets/outputs/result.mp4" export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed export UPSCALE_FACTOR=1 export FPS=10 - docker run -d --name="test-comps-animation-wav2lip" -v $WORKPATH/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT -p 7860:7860 --ipc=host opea/wav2lip:comps - docker run -d --name="test-comps-animation" -v $WORKPATH/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e WAV2LIP_ENDPOINT=http://$ip_address:7860 -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p 9066:9066 --ipc=host opea/animation:comps + docker run -d --name="test-comps-animation-wav2lip" -v $WORKPATH/comps/animation/src/assets:/home/user/comps/animation/src/assets -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT -p 7860:7860 --ipc=host opea/wav2lip:comps + docker run -d --name="test-comps-animation" -v $WORKPATH/comps/animation/src/assets:/home/user/comps/animation/src/assets -e WAV2LIP_ENDPOINT=http://$ip_address:7860 -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p 9066:9066 --ipc=host opea/animation:comps sleep 3m } function validate_microservice() { cd $WORKPATH - result=$(http_proxy="" curl http://localhost:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/wav2lip/assets/audio/sample_question.json) + result=$(http_proxy="" curl http://localhost:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/src/assets/audio/sample_question.json) if [[ $result == *"result.mp4"* ]]; then echo "Result correct." else