Refactor example code (#183)
Signed-off-by: lvliang-intel <liang1.lv@intel.com> Signed-off-by: Yue, Wenjiao <wenjiao.yue@intel.com> Signed-off-by: chensuyue <suyue.chen@intel.com>
This commit is contained in:
@@ -31,7 +31,7 @@ RUN cd /home/user/ && \
|
||||
RUN cd /home/user/GenAIComps && pip install --no-cache-dir --upgrade pip && \
|
||||
pip install -r /home/user/GenAIComps/requirements.txt
|
||||
|
||||
COPY ../code_translation.py /home/user/code_translation.py
|
||||
COPY ./code_translation.py /home/user/code_translation.py
|
||||
|
||||
ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps
|
||||
|
||||
@@ -14,8 +14,8 @@ The Code Translation service can be effortlessly deployed on either Intel Gaudi2
|
||||
|
||||
## Deploy Code Translation on Gaudi
|
||||
|
||||
Refer to the [Gaudi Guide](./microservice/gaudi/README.md) for instructions on deploying Code Translation on Gaudi.
|
||||
Refer to the [Gaudi Guide](./docker-composer/gaudi/README.md) for instructions on deploying Code Translation on Gaudi.
|
||||
|
||||
## Deploy Code Translation on Xeon
|
||||
|
||||
Refer to the [Xeon Guide](./microservice/xeon/README.md) for instructions on deploying Code Translation on Xeon.
|
||||
Refer to the [Xeon Guide](./docker-composer/xeon/README.md) for instructions on deploying Code Translation on Xeon.
|
||||
|
||||
@@ -18,17 +18,25 @@ import os
|
||||
|
||||
from comps import CodeTransGateway, MicroService, ServiceOrchestrator
|
||||
|
||||
SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
|
||||
MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
|
||||
MEGA_SERVICE_PORT = os.getenv("MEGA_SERVICE_PORT", 7777)
|
||||
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
|
||||
LLM_SERVICE_PORT = os.getenv("LLM_SERVICE_PORT", 9000)
|
||||
|
||||
|
||||
class MyServiceOrchestrator:
|
||||
def __init__(self, port=8000):
|
||||
class CodeTransService:
|
||||
def __init__(self, host="0.0.0.0", port=8000):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.megaservice = ServiceOrchestrator()
|
||||
|
||||
def add_remote_service(self):
|
||||
llm = MicroService(
|
||||
name="llm", host=SERVICE_HOST_IP, port=9000, endpoint="/v1/chat/completions", use_remote_service=True
|
||||
name="llm",
|
||||
host=LLM_SERVICE_HOST_IP,
|
||||
port=LLM_SERVICE_PORT,
|
||||
endpoint="/v1/chat/completions",
|
||||
use_remote_service=True,
|
||||
)
|
||||
self.megaservice.add(llm)
|
||||
self.gateway = CodeTransGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)
|
||||
@@ -55,6 +63,6 @@ class MyServiceOrchestrator:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
service_ochestrator = MyServiceOrchestrator(port=7777)
|
||||
service_ochestrator = CodeTransService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT)
|
||||
service_ochestrator.add_remote_service()
|
||||
asyncio.run(service_ochestrator.schedule())
|
||||
53
CodeTrans/codetrans.yaml
Normal file
53
CodeTrans/codetrans.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2024 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
opea_micro_services:
|
||||
tgi_service:
|
||||
host: ${TGI_SERVICE_IP}
|
||||
ports: ${TGI_SERVICE_PORT}
|
||||
image: ghcr.io/huggingface/tgi-gaudi:1.2.1
|
||||
volumes:
|
||||
- "./data:/data"
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
ipc: host
|
||||
environment:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
model-id: ${LLM_MODEL_ID}
|
||||
llm:
|
||||
host: ${LLM_SERVICE_HOST_IP}
|
||||
ports: ${LLM_SERVICE_PORT}
|
||||
image: opea/gen-ai-comps:llm-tgi-gaudi-server
|
||||
endpoint: /v1/chat/completions
|
||||
environment:
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
ui:
|
||||
host: ${UI_SERVICE_HOST_IP}
|
||||
ports:
|
||||
- "5173:5173"
|
||||
environment:
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
image: opea/gen-ai-comps:codetrans-ui-server
|
||||
|
||||
opea_mega_service:
|
||||
host: ${MEGA_SERVICE_HOST_IP}
|
||||
ports: ${MEGA_SERVICE_PORT}
|
||||
endpoint: /v1/codetrans
|
||||
image: opea/gen-ai-comps:codetrans-ui-server
|
||||
mega_flow:
|
||||
- llm
|
||||
@@ -16,15 +16,15 @@ cd GenAIComps
|
||||
### 2. Build the LLM Docker Image with the following command
|
||||
|
||||
```bash
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-gaudi-server --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-gaudi-server --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
```
|
||||
|
||||
### 3. Build MegaService Docker Image
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/CodeTrans/microservice/gaudi/
|
||||
docker build -t opea/gen-ai-comps:codetrans-megaservice-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
cd GenAIExamples/CodeTrans
|
||||
docker build -t opea/gen-ai-comps:codetrans-megaservice-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build UI Docker Image
|
||||
@@ -53,12 +53,14 @@ export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
|
||||
export TGI_LLM_ENDPOINT="http://${host_ip}:8008"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:7777/v1/codetrans"
|
||||
```
|
||||
|
||||
### Start Microservice Docker Containers
|
||||
|
||||
```bash
|
||||
cd GenAIExamples/CodeTrans/docker-composer/gaudi
|
||||
docker compose -f docker_compose.yaml up -d
|
||||
```
|
||||
|
||||
@@ -56,6 +56,7 @@ services:
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
||||
ipc: host
|
||||
restart: always
|
||||
codetrans-gaudi-ui-server:
|
||||
@@ -24,15 +24,15 @@ cd GenAIComps
|
||||
### 2. Build the LLM Docker Image with the following command
|
||||
|
||||
```bash
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
```
|
||||
|
||||
### 3. Build MegaService Docker Image
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/CodeTrans/microservice/xeon/
|
||||
docker build -t opea/gen-ai-comps:codetrans-megaservice-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
cd GenAIExamples/CodeTrans
|
||||
docker build -t opea/gen-ai-comps:codetrans-megaservice-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build UI Docker Image
|
||||
@@ -61,12 +61,14 @@ export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
|
||||
export TGI_LLM_ENDPOINT="http://${host_ip}:8008"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:7777/v1/codetrans"
|
||||
```
|
||||
|
||||
### Start Microservice Docker Containers
|
||||
|
||||
```bash
|
||||
cd GenAIExamples/CodeTrans/docker-composer/xeon
|
||||
docker compose -f docker_compose.yaml up -d
|
||||
```
|
||||
|
||||
@@ -51,6 +51,7 @@ services:
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
||||
ipc: host
|
||||
restart: always
|
||||
codetrans-xeon-ui-server:
|
||||
0
CodeTrans/kubernetes/README.md
Normal file
0
CodeTrans/kubernetes/README.md
Normal file
0
CodeTrans/kubernetes/helm-charts/README.md
Normal file
0
CodeTrans/kubernetes/helm-charts/README.md
Normal file
0
CodeTrans/kubernetes/manifests/README.md
Normal file
0
CodeTrans/kubernetes/manifests/README.md
Normal file
0
CodeTrans/kubernetes/service-mesh/README.md
Normal file
0
CodeTrans/kubernetes/service-mesh/README.md
Normal file
@@ -1,60 +0,0 @@
|
||||
# Copyright (c) 2024 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from comps import CodeTransGateway, MicroService, ServiceOrchestrator
|
||||
|
||||
SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
|
||||
|
||||
|
||||
class MyServiceOrchestrator:
|
||||
def __init__(self, port=8000):
|
||||
self.port = port
|
||||
self.megaservice = ServiceOrchestrator()
|
||||
|
||||
def add_remote_service(self):
|
||||
llm = MicroService(
|
||||
name="llm", host=SERVICE_HOST_IP, port=9000, endpoint="/v1/chat/completions", use_remote_service=True
|
||||
)
|
||||
self.megaservice.add(llm)
|
||||
self.gateway = CodeTransGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)
|
||||
|
||||
async def schedule(self):
|
||||
await self.megaservice.schedule(
|
||||
initial_inputs={
|
||||
"query": """
|
||||
### System: Please translate the following Golang codes into Python codes.
|
||||
|
||||
### Original codes:
|
||||
'''Golang
|
||||
|
||||
\npackage main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n
|
||||
|
||||
'''
|
||||
|
||||
### Translated codes:
|
||||
"""
|
||||
}
|
||||
)
|
||||
result_dict = self.megaservice.result_dict
|
||||
print(result_dict)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
service_ochestrator = MyServiceOrchestrator(port=7777)
|
||||
service_ochestrator.add_remote_service()
|
||||
asyncio.run(service_ochestrator.schedule())
|
||||
@@ -1,42 +0,0 @@
|
||||
# Copyright (c) 2024 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
FROM python:3.11-slim
|
||||
|
||||
RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \
|
||||
libgl1-mesa-glx \
|
||||
libjemalloc-dev \
|
||||
vim \
|
||||
git
|
||||
|
||||
RUN useradd -m -s /bin/bash user && \
|
||||
mkdir -p /home/user && \
|
||||
chown -R user /home/user/
|
||||
|
||||
RUN cd /home/user/ && \
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
RUN cd /home/user/GenAIComps && pip install --no-cache-dir --upgrade pip && \
|
||||
pip install -r /home/user/GenAIComps/requirements.txt
|
||||
|
||||
COPY ../code_translation.py /home/user/code_translation.py
|
||||
|
||||
ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps
|
||||
|
||||
USER user
|
||||
|
||||
WORKDIR /home/user
|
||||
|
||||
ENTRYPOINT ["python", "code_translation.py"]
|
||||
@@ -13,12 +13,12 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-gaudi-server -f comps/llms/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-gaudi-server -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:1.2.1
|
||||
|
||||
cd $WORKPATH/microservice/gaudi
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-megaservice-server -f docker/Dockerfile .
|
||||
cd $WORKPATH
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-megaservice-server -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/ui
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-ui-server -f docker/Dockerfile .
|
||||
@@ -27,7 +27,7 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/microservice/gaudi
|
||||
cd $WORKPATH/docker-composer/gaudi
|
||||
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${http_proxy}
|
||||
@@ -35,6 +35,7 @@ function start_services() {
|
||||
export TGI_LLM_ENDPOINT="http://${ip_address}:8008"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:7777/v1/codetrans"
|
||||
|
||||
# Start Docker Containers
|
||||
@@ -96,7 +97,7 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
function stop_docker() {
|
||||
cd $WORKPATH/microservice/gaudi
|
||||
cd $WORKPATH/docker-composer/gaudi
|
||||
container_list=$(cat docker_compose.yaml | grep container_name | cut -d':' -f2)
|
||||
for container_name in $container_list; do
|
||||
cid=$(docker ps -aq --filter "name=$container_name")
|
||||
|
||||
@@ -14,10 +14,10 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-server -f comps/llms/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-server -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/microservice/xeon
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-megaservice-server -f docker/Dockerfile .
|
||||
cd $WORKPATH
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-megaservice-server -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/ui
|
||||
docker build --no-cache -t opea/gen-ai-comps:codetrans-ui-server -f docker/Dockerfile .
|
||||
@@ -26,20 +26,21 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/microservice/xeon
|
||||
cd $WORKPATH/docker-composer/xeon
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${http_proxy}
|
||||
export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
|
||||
export TGI_LLM_ENDPOINT="http://${ip_address}:8008"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:7777/v1/codetrans"
|
||||
|
||||
# Start Docker Containers
|
||||
# TODO: Replace the container name with a test-specific name
|
||||
docker compose -f docker_compose.yaml up -d
|
||||
|
||||
sleep 1m
|
||||
sleep 2m
|
||||
}
|
||||
|
||||
function validate_microservices() {
|
||||
@@ -80,7 +81,7 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
function stop_docker() {
|
||||
cd $WORKPATH/microservice/xeon
|
||||
cd $WORKPATH/docker-composer/xeon
|
||||
container_list=$(cat docker_compose.yaml | grep container_name | cut -d':' -f2)
|
||||
for container_name in $container_list; do
|
||||
cid=$(docker ps -aq --filter "name=$container_name")
|
||||
|
||||
@@ -3,24 +3,13 @@
|
||||
"version": "0.0.1",
|
||||
"scripts": {
|
||||
"dev": "vite dev",
|
||||
"build": "vite build && npm run package",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"package": "svelte-kit sync && svelte-package && publint",
|
||||
"prepublishOnly": "npm run package",
|
||||
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
|
||||
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"svelte": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"!dist/**/*.test.*",
|
||||
"!dist/**/*.spec.*"
|
||||
],
|
||||
"peerDependencies": {
|
||||
"svelte": "^4.0.0"
|
||||
},
|
||||
@@ -45,8 +34,6 @@
|
||||
"typescript": "^5.0.0",
|
||||
"vite": "^5.0.11"
|
||||
},
|
||||
"svelte": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"prismjs": "^1.29.0",
|
||||
|
||||
Reference in New Issue
Block a user