Refactor image2image (#1076)
* Refactor image2image Signed-off-by: Yao, Qing <qing.yao@intel.com> --------- Signed-off-by: Yao, Qing <qing.yao@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -5,9 +5,9 @@
|
||||
services:
|
||||
image2image:
|
||||
build:
|
||||
dockerfile: comps/image2image/Dockerfile
|
||||
dockerfile: comps/image2image/src/Dockerfile
|
||||
image: ${REGISTRY:-opea}/image2image:${TAG:-latest}
|
||||
image2image-gaudi:
|
||||
build:
|
||||
dockerfile: comps/image2image/Dockerfile.intel_hpu
|
||||
dockerfile: comps/image2image/src/Dockerfile.intel_hpu
|
||||
image: ${REGISTRY:-opea}/image2image-gaudi:${TAG:-latest}
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
import torch
|
||||
from diffusers import AutoPipelineForImage2Image
|
||||
from diffusers.utils import load_image
|
||||
|
||||
from comps import (
|
||||
CustomLogger,
|
||||
SDImg2ImgInputs,
|
||||
SDOutputs,
|
||||
ServiceType,
|
||||
opea_microservices,
|
||||
register_microservice,
|
||||
register_statistics,
|
||||
statistics_dict,
|
||||
)
|
||||
|
||||
logger = CustomLogger("image2image")
|
||||
pipe = None
|
||||
args = None
|
||||
initialization_lock = threading.Lock()
|
||||
initialized = False
|
||||
|
||||
|
||||
def initialize():
|
||||
global pipe, args, initialized
|
||||
with initialization_lock:
|
||||
if not initialized:
|
||||
# initialize model and tokenizer
|
||||
if os.getenv("MODEL", None):
|
||||
args.model_name_or_path = os.getenv("MODEL")
|
||||
kwargs = {}
|
||||
if args.bf16:
|
||||
kwargs["torch_dtype"] = torch.bfloat16
|
||||
if not args.token:
|
||||
args.token = os.getenv("HF_TOKEN")
|
||||
if args.device == "hpu":
|
||||
kwargs.update(
|
||||
{
|
||||
"use_habana": True,
|
||||
"use_hpu_graphs": args.use_hpu_graphs,
|
||||
"gaudi_config": "Habana/stable-diffusion",
|
||||
"token": args.token,
|
||||
}
|
||||
)
|
||||
if "stable-diffusion-xl" in args.model_name_or_path:
|
||||
from optimum.habana.diffusers import GaudiStableDiffusionXLImg2ImgPipeline
|
||||
|
||||
pipe = GaudiStableDiffusionXLImg2ImgPipeline.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Only support stable-diffusion-xl now, " + f"model {args.model_name_or_path} not supported."
|
||||
)
|
||||
elif args.device == "cpu":
|
||||
pipe = AutoPipelineForImage2Image.from_pretrained(args.model_name_or_path, token=args.token, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError(f"Only support cpu and hpu device now, device {args.device} not supported.")
|
||||
logger.info("Stable Diffusion model initialized.")
|
||||
initialized = True
|
||||
|
||||
|
||||
@register_microservice(
|
||||
name="opea_service@image2image",
|
||||
service_type=ServiceType.IMAGE2IMAGE,
|
||||
endpoint="/v1/image2image",
|
||||
host="0.0.0.0",
|
||||
port=9389,
|
||||
input_datatype=SDImg2ImgInputs,
|
||||
output_datatype=SDOutputs,
|
||||
)
|
||||
@register_statistics(names=["opea_service@image2image"])
|
||||
def image2image(input: SDImg2ImgInputs):
|
||||
initialize()
|
||||
start = time.time()
|
||||
image = load_image(input.image).convert("RGB")
|
||||
prompt = input.prompt
|
||||
num_images_per_prompt = input.num_images_per_prompt
|
||||
|
||||
generator = torch.manual_seed(args.seed)
|
||||
images = pipe(image=image, prompt=prompt, generator=generator, num_images_per_prompt=num_images_per_prompt).images
|
||||
image_path = os.path.join(os.getcwd(), prompt.strip().replace(" ", "_").replace("/", ""))
|
||||
os.makedirs(image_path, exist_ok=True)
|
||||
results = []
|
||||
for i, image in enumerate(images):
|
||||
save_path = os.path.join(image_path, f"image_{i+1}.png")
|
||||
image.save(save_path)
|
||||
with open(save_path, "rb") as f:
|
||||
bytes = f.read()
|
||||
b64_str = base64.b64encode(bytes).decode()
|
||||
results.append(b64_str)
|
||||
statistics_dict["opea_service@image2image"].append_latency(time.time() - start, None)
|
||||
return SDOutputs(images=results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model_name_or_path", type=str, default="stabilityai/stable-diffusion-xl-refiner-1.0")
|
||||
parser.add_argument("--use_hpu_graphs", default=False, action="store_true")
|
||||
parser.add_argument("--device", type=str, default="cpu")
|
||||
parser.add_argument("--token", type=str, default=None)
|
||||
parser.add_argument("--seed", type=int, default=42)
|
||||
parser.add_argument("--bf16", action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info("Image2image server started.")
|
||||
opea_microservices["opea_service@image2image"].start()
|
||||
@@ -12,12 +12,12 @@ COPY comps /home/comps
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
if [ ${ARCH} = "cpu" ]; then pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \
|
||||
pip install --no-cache-dir -r /home/comps/image2image/requirements.txt
|
||||
pip install --no-cache-dir -r /home/comps/image2image/src/requirements.txt
|
||||
|
||||
ENV PYTHONPATH=$PYTHONPATH:/home
|
||||
|
||||
WORKDIR /home/comps/image2image
|
||||
WORKDIR /home/comps/image2image/src
|
||||
|
||||
RUN echo python image2image.py --bf16 >> run.sh
|
||||
RUN echo python opea_image2image_microservice.py --bf16 >> run.sh
|
||||
|
||||
CMD bash run.sh
|
||||
@@ -19,11 +19,11 @@ ENV PYTHONPATH=/home/user:/usr/lib/habanalabs/:/home/user/optimum-habana
|
||||
|
||||
# Install requirements and optimum habana
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir -r /home/user/comps/image2image/requirements.txt && \
|
||||
pip install --no-cache-dir -r /home/user/comps/image2image/src/requirements.txt && \
|
||||
pip install --no-cache-dir optimum[habana]
|
||||
|
||||
WORKDIR /home/user/comps/image2image
|
||||
WORKDIR /home/user/comps/image2image/src
|
||||
|
||||
RUN echo python image2image.py --device hpu --use_hpu_graphs --bf16 >> run.sh
|
||||
RUN echo python opea_image2image_microservice.py --device hpu --use_hpu_graphs --bf16 >> run.sh
|
||||
|
||||
CMD bash run.sh
|
||||
@@ -28,7 +28,7 @@ export HF_TOKEN=<your huggingface token>
|
||||
Start the OPEA Microservice:
|
||||
|
||||
```bash
|
||||
python image2image.py --bf16 --model_name_or_path $MODEL --token $HF_TOKEN
|
||||
python opea_image2image_microservice.py --bf16 --model_name_or_path $MODEL --token $HF_TOKEN
|
||||
```
|
||||
|
||||
# 🚀2. Start Microservice with Docker (Option 2)
|
||||
@@ -48,7 +48,7 @@ Build image-to-image service image on Xeon with below command:
|
||||
|
||||
```bash
|
||||
cd ../..
|
||||
docker build -t opea/image2image:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/image2image/Dockerfile .
|
||||
docker build -t opea/image2image:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/image2image/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2.1.2 Image-to-Image Service Image on Gaudi
|
||||
@@ -57,7 +57,7 @@ Build image-to-image service image on Gaudi with below command:
|
||||
|
||||
```bash
|
||||
cd ../..
|
||||
docker build -t opea/image2image-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/image2image/Dockerfile.intel_hpu .
|
||||
docker build -t opea/image2image-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/image2image/src/Dockerfile.intel_hpu .
|
||||
```
|
||||
|
||||
## 2.2 Start Image-to-Image Service
|
||||
2
comps/image2image/src/integration/__init__.py
Normal file
2
comps/image2image/src/integration/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
139
comps/image2image/src/integration/opea_image2image_native.py
Normal file
139
comps/image2image/src/integration/opea_image2image_native.py
Normal file
@@ -0,0 +1,139 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import base64
|
||||
import os
|
||||
import threading
|
||||
|
||||
from comps import CustomLogger, OpeaComponent, SDImg2ImgInputs, ServiceType
|
||||
|
||||
logger = CustomLogger("opea_imagetoimage")
|
||||
logflag = os.getenv("LOGFLAG", False)
|
||||
|
||||
import torch
|
||||
from diffusers import AutoPipelineForImage2Image
|
||||
from diffusers.utils import load_image
|
||||
|
||||
pipe = None
|
||||
args = None
|
||||
initialization_lock = threading.Lock()
|
||||
initialized = False
|
||||
|
||||
|
||||
def initialize(
|
||||
model_name_or_path="stabilityai/stable-diffusion-xl-refiner-1.0",
|
||||
device="cpu",
|
||||
token=None,
|
||||
bf16=True,
|
||||
use_hpu_graphs=False,
|
||||
):
|
||||
global pipe, args, initialized
|
||||
with initialization_lock:
|
||||
if not initialized:
|
||||
# initialize model and tokenizer
|
||||
if os.getenv("MODEL", None):
|
||||
model_name_or_path = os.getenv("MODEL")
|
||||
kwargs = {}
|
||||
if bf16:
|
||||
kwargs["torch_dtype"] = torch.bfloat16
|
||||
if not token:
|
||||
token = os.getenv("HF_TOKEN")
|
||||
if device == "hpu":
|
||||
kwargs(
|
||||
{
|
||||
"use_habana": True,
|
||||
"use_hpu_graphs": use_hpu_graphs,
|
||||
"gaudi_config": "Habana/stable-diffusion",
|
||||
"token": token,
|
||||
}
|
||||
)
|
||||
if "stable-diffusion-xl" in model_name_or_path:
|
||||
from optimum.habana.diffusers import GaudiStableDiffusionXLImg2ImgPipeline
|
||||
|
||||
pipe = GaudiStableDiffusionXLImg2ImgPipeline.from_pretrained(
|
||||
model_name_or_path,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Only support stable-diffusion-xl now, " + f"model {model_name_or_path} not supported."
|
||||
)
|
||||
elif device == "cpu":
|
||||
pipe = AutoPipelineForImage2Image.from_pretrained(model_name_or_path, token=token, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError(f"Only support cpu and hpu device now, device {device} not supported.")
|
||||
logger.info("Stable Diffusion model initialized.")
|
||||
initialized = True
|
||||
|
||||
|
||||
class OpeaImageToImage(OpeaComponent):
|
||||
"""A specialized ImageToImage component derived from OpeaComponent for Stable Diffusion model .
|
||||
|
||||
Attributes:
|
||||
model_name_or_path (str): The name of the Stable Diffusion model used.
|
||||
device (str): which device to use.
|
||||
token(str): Huggingface Token.
|
||||
bf16(bool): Is use bf16.
|
||||
use_hpu_graphs(bool): Is use hpu_graphs.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
config: dict = None,
|
||||
seed=42,
|
||||
model_name_or_path="stabilityai/stable-diffusion-xl-refiner-1.0",
|
||||
device="cpu",
|
||||
token=None,
|
||||
bf16=True,
|
||||
use_hpu_graphs=False,
|
||||
):
|
||||
super().__init__(name, ServiceType.IMAGE2IMAGE.name.lower(), description, config)
|
||||
initialize(
|
||||
model_name_or_path=model_name_or_path, device=device, token=token, bf16=bf16, use_hpu_graphs=use_hpu_graphs
|
||||
)
|
||||
self.pipe = pipe
|
||||
self.seed = seed
|
||||
|
||||
def invoke(self, input: SDImg2ImgInputs):
|
||||
"""Invokes the ImageToImage service to generate Images for the provided input.
|
||||
|
||||
Args:
|
||||
input (SDImg2ImgInputs): The input in SD images format.
|
||||
"""
|
||||
image = load_image(input.image).convert("RGB")
|
||||
prompt = input.prompt
|
||||
num_images_per_prompt = input.num_images_per_prompt
|
||||
|
||||
generator = torch.manual_seed(self.seed)
|
||||
images = pipe(
|
||||
image=image, prompt=prompt, generator=generator, num_images_per_prompt=num_images_per_prompt
|
||||
).images
|
||||
image_path = os.path.join(os.getcwd(), prompt.strip().replace(" ", "_").replace("/", ""))
|
||||
os.makedirs(image_path, exist_ok=True)
|
||||
results = []
|
||||
for i, image in enumerate(images):
|
||||
save_path = os.path.join(image_path, f"image_{i + 1}.png")
|
||||
image.save(save_path)
|
||||
with open(save_path, "rb") as f:
|
||||
bytes = f.read()
|
||||
b64_str = base64.b64encode(bytes).decode()
|
||||
results.append(b64_str)
|
||||
|
||||
return results
|
||||
|
||||
def check_health(self) -> bool:
|
||||
"""Checks the health of the ImageToImage service.
|
||||
|
||||
Returns:
|
||||
bool: True if the service is reachable and healthy, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if self.pipe:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
# Handle connection errors, timeouts, etc.
|
||||
logger.error(f"Health check failed: {e}")
|
||||
return False
|
||||
81
comps/image2image/src/opea_image2image_microservice.py
Normal file
81
comps/image2image/src/opea_image2image_microservice.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import os
|
||||
import time
|
||||
|
||||
from comps import (
|
||||
CustomLogger,
|
||||
OpeaComponentController,
|
||||
SDImg2ImgInputs,
|
||||
SDOutputs,
|
||||
ServiceType,
|
||||
opea_microservices,
|
||||
register_microservice,
|
||||
register_statistics,
|
||||
statistics_dict,
|
||||
)
|
||||
from comps.image2image.src.integration.opea_image2image_native import OpeaImageToImage
|
||||
|
||||
args = None
|
||||
|
||||
logger = CustomLogger("image2image")
|
||||
|
||||
|
||||
# Initialize OpeaComponentController
|
||||
controller = OpeaComponentController()
|
||||
|
||||
# Register components
|
||||
# try:
|
||||
|
||||
# except Exception as e:
|
||||
# logger.error(f"Failed to initialize components: {e}")
|
||||
|
||||
|
||||
@register_microservice(
|
||||
name="opea_service@image2image",
|
||||
service_type=ServiceType.IMAGE2IMAGE,
|
||||
endpoint="/v1/image2image",
|
||||
host="0.0.0.0",
|
||||
port=9389,
|
||||
input_datatype=SDImg2ImgInputs,
|
||||
output_datatype=SDOutputs,
|
||||
)
|
||||
@register_statistics(names=["opea_service@image2image"])
|
||||
def image2image(input: SDImg2ImgInputs):
|
||||
start = time.time()
|
||||
results = controller.invoke(input)
|
||||
statistics_dict["opea_service@image2image"].append_latency(time.time() - start, None)
|
||||
return SDOutputs(images=results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model_name_or_path", type=str, default="stabilityai/stable-diffusion-xl-refiner-1.0")
|
||||
parser.add_argument("--use_hpu_graphs", default=False, action="store_true")
|
||||
parser.add_argument("--device", type=str, default="cpu")
|
||||
parser.add_argument("--token", type=str, default=None)
|
||||
parser.add_argument("--seed", type=int, default=42)
|
||||
parser.add_argument("--bf16", action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
# Instantiate Animation component and register it to controller
|
||||
opea_imagetoimage = OpeaImageToImage(
|
||||
name="OpeaImageToImage",
|
||||
description="OPEA Image To Image Service",
|
||||
seed=args.seed,
|
||||
model_name_or_path=args.model_name_or_path,
|
||||
device=args.device,
|
||||
token=args.token,
|
||||
bf16=args.bf16,
|
||||
use_hpu_graphs=args.use_hpu_graphs,
|
||||
)
|
||||
|
||||
controller.register(opea_imagetoimage)
|
||||
|
||||
# Discover and activate a healthy component
|
||||
controller.discover_and_activate()
|
||||
logger.info("Image2image server started.")
|
||||
opea_microservices["opea_service@image2image"].start()
|
||||
@@ -10,7 +10,7 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
echo $(pwd)
|
||||
docker build --no-cache -t opea/image2image:latest -f comps/image2image/Dockerfile .
|
||||
docker build --no-cache -t opea/image2image:latest -f comps/image2image/src/Dockerfile .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "opea/image2image built fail"
|
||||
exit 1
|
||||
|
||||
Reference in New Issue
Block a user