diff --git a/CodeGen/assets/img/ui-result-page.png b/CodeGen/assets/img/ui-result-page.png new file mode 100644 index 000000000..a9f49cef6 Binary files /dev/null and b/CodeGen/assets/img/ui-result-page.png differ diff --git a/CodeGen/assets/img/ui-starting-page.png b/CodeGen/assets/img/ui-starting-page.png new file mode 100644 index 000000000..61922b5eb Binary files /dev/null and b/CodeGen/assets/img/ui-starting-page.png differ diff --git a/CodeGen/docker_compose/amd/gpu/rocm/README.md b/CodeGen/docker_compose/amd/gpu/rocm/README.md index d8c6a4c88..c1a6a7717 100644 --- a/CodeGen/docker_compose/amd/gpu/rocm/README.md +++ b/CodeGen/docker_compose/amd/gpu/rocm/README.md @@ -1,47 +1,117 @@ -# Build and deploy CodeGen Application on AMD GPU (ROCm) +# Build and Deploy CodeGen Application on AMD GPU (ROCm) -## Build images +## Build Docker Images -### Build the LLM Docker Image +### 1. Build Docker Image -```bash -### Cloning repo -git clone https://github.com/opea-project/GenAIComps.git -cd GenAIComps +- #### Create application install directory and go to it: -### Build Docker image -docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . -``` + ```bash + mkdir ~/codegen-install && cd codegen-install + ``` -### Build the MegaService Docker Image +- #### Clone the repository GenAIExamples (the default repository branch "main" is used here): -```bash -### Cloning repo -git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/CodeGen + ```bash + git clone https://github.com/opea-project/GenAIExamples.git + ``` -### Build Docker image -docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . -``` + If you need to use a specific branch/tag of the GenAIExamples repository, then (v1.3 replace with its own value): -### Build the UI Docker Image + ```bash + git clone https://github.com/opea-project/GenAIExamples.git && cd GenAIExamples && git checkout v1.3 + ``` -```bash -cd GenAIExamples/CodeGen/ui -### Build UI Docker image -docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . + We remind you that when using a specific version of the code, you need to use the README from this version: -### Build React UI Docker image (React UI allows you to use file uploads) -docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . -``` +- #### Go to build directory: -It is recommended to use the React UI as it works for downloading files. The use of React UI is set in the Docker Compose file + ```bash + cd ~/codegen-install/GenAIExamples/CodeGen/docker_image_build + ``` -## Deploy CodeGen Application +- Cleaning up the GenAIComps repository if it was previously cloned in this directory. + This is necessary if the build was performed earlier and the GenAIComps folder exists and is not empty: -### Features of Docker compose for AMD GPUs + ```bash + echo Y | rm -R GenAIComps + ``` -1. Added forwarding of GPU devices to the container TGI service with instructions: +- #### Clone the repository GenAIComps (the default repository branch "main" is used here): + + ```bash + git clone https://github.com/opea-project/GenAIComps.git + ``` + + If you use a specific tag of the GenAIExamples repository, + then you should also use the corresponding tag for GenAIComps. (v1.3 replace with its own value): + + ```bash + git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.3 + ``` + + We remind you that when using a specific version of the code, you need to use the README from this version. + +- #### Setting the list of images for the build (from the build file.yaml) + + If you want to deploy a vLLM-based or TGI-based application, then the set of services is installed as follows: + + #### vLLM-based application + + ```bash + service_list="vllm-rocm llm-textgen codegen codegen-ui" + ``` + + #### TGI-based application + + ```bash + service_list="llm-textgen codegen codegen-ui" + ``` + +- #### Optional. Pull TGI Docker Image (Do this if you want to use TGI) + + ```bash + docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + ``` + +- #### Build Docker Images + + ```bash + docker compose -f build.yaml build ${service_list} --no-cache + ``` + + After the build, we check the list of images with the command: + + ```bash + docker image ls + ``` + + The list of images should include: + + ##### vLLM-based application: + + - opea/vllm-rocm:latest + - opea/llm-textgen:latest + - opea/codegen:latest + - opea/codegen-ui:latest + + ##### TGI-based application: + + - ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + - opea/llm-textgen:latest + - opea/codegen:latest + - opea/codegen-ui:latest + +--- + +## Deploy the CodeGen Application + +### Docker Compose Configuration for AMD GPUs + +To enable GPU support for AMD GPUs, the following configuration is added to the Docker Compose file: + +- compose_vllm.yaml - for vLLM-based application +- compose.yaml - for TGI-based ```yaml shm_size: 1g @@ -56,9 +126,7 @@ security_opt: - seccomp:unconfined ``` -In this case, all GPUs are thrown. To reset a specific GPU, you need to use specific device names cardN and renderN. - -For example: +This configuration forwards all available GPUs to the container. To use a specific GPU, specify its `cardN` and `renderN` device IDs. For example: ```yaml shm_size: 1g @@ -74,53 +142,284 @@ security_opt: - seccomp:unconfined ``` -To find out which GPU device IDs cardN and renderN correspond to the same GPU, use the GPU driver utility +**How to Identify GPU Device IDs:** +Use AMD GPU driver utilities to determine the correct `cardN` and `renderN` IDs for your GPU. -### Go to the directory with the Docker compose file +### Set deploy environment variables + +#### Setting variables in the operating system environment: + +##### Set variable HUGGINGFACEHUB_API_TOKEN: ```bash -cd GenAIExamples/CodeGen/docker_compose/amd/gpu/rocm +### Replace the string 'your_huggingfacehub_token' with your HuggingFacehub repository access token. +export HUGGINGFACEHUB_API_TOKEN='your_huggingfacehub_token' ``` -### Set environments +#### Set variables value in set_env\*\*\*\*.sh file: -In the file "GenAIExamples/CodeGen/docker_compose/amd/gpu/rocm/set_env.sh " it is necessary to set the required values. Parameter assignments are specified in the comments for each variable setting command +Go to Docker Compose directory: + +```bash +cd ~/codegen-install/GenAIExamples/CodeGen/docker_compose/amd/gpu/rocm +``` + +The example uses the Nano text editor. You can use any convenient text editor: + +#### If you use vLLM + +```bash +nano set_env_vllm.sh +``` + +#### If you use TGI + +```bash +nano set_env.sh +``` + +If you are in a proxy environment, also set the proxy-related environment variables: + +```bash +export http_proxy="Your_HTTP_Proxy" +export https_proxy="Your_HTTPs_Proxy" +``` + +Set the values of the variables: + +- **HOST_IP, HOST_IP_EXTERNAL** - These variables are used to configure the name/address of the service in the operating system environment for the application services to interact with each other and with the outside world. + + If your server uses only an internal address and is not accessible from the Internet, then the values for these two variables will be the same and the value will be equal to the server's internal name/address. + + If your server uses only an external, Internet-accessible address, then the values for these two variables will be the same and the value will be equal to the server's external name/address. + + If your server is located on an internal network, has an internal address, but is accessible from the Internet via a proxy/firewall/load balancer, then the HOST_IP variable will have a value equal to the internal name/address of the server, and the EXTERNAL_HOST_IP variable will have a value equal to the external name/address of the proxy/firewall/load balancer behind which the server is located. + + We set these values in the file set_env\*\*\*\*.sh + +- **Variables with names like "**\*\*\*\*\*\*\_PORT"\*\* - These variables set the IP port numbers for establishing network connections to the application services. + The values shown in the file set_env.sh or set_env_vllm they are the values used for the development and testing of the application, as well as configured for the environment in which the development is performed. These values must be configured in accordance with the rules of network access to your environment's server, and must not overlap with the IP ports of other applications that are already in use. + +#### Set variables with script set_env\*\*\*\*.sh + +#### If you use vLLM + +```bash +. set_env_vllm.sh +``` + +#### If you use TGI ```bash -chmod +x set_env.sh . set_env.sh ``` -### Run services +### Start the services: -``` -docker compose up -d -``` - -# Validate the MicroServices and MegaService - -## Validate TGI service +#### If you use vLLM ```bash +docker compose -f compose_vllm.yaml up -d +``` + +#### If you use TGI + +```bash +docker compose -f compose.yaml up -d +``` + +All containers should be running and should not restart: + +##### If you use vLLM: + +- codegen-vllm-service +- codegen-llm-server +- codegen-backend-server +- codegen-ui-server + +##### If you use TGI: + +- codegen-tgi-service +- codegen-llm-server +- codegen-backend-server +- codegen-ui-server + +--- + +## Validate the Services + +### 1. Validate the vLLM/TGI Service + +#### If you use vLLM: + +```bash +DATA='{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", '\ +'"messages": [{"role": "user", "content": "Implement a high-level API for a TODO list application. '\ +'The API takes as input an operation request and updates the TODO list in place. '\ +'If the request is invalid, raise an exception."}], "max_tokens": 256}' + +curl http://${HOST_IP}:${CODEGEN_VLLM_SERVICE_PORT}/v1/chat/completions \ + -X POST \ + -d "$DATA" \ + -H 'Content-Type: application/json' +``` + +Checking the response from the service. The response should be similar to JSON: + +````json +{ + "id": "chatcmpl-142f34ef35b64a8db3deedd170fed951", + "object": "chat.completion", + "created": 1742270316, + "model": "Qwen/Qwen2.5-Coder-7B-Instruct", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "```python\nfrom typing import Optional, List, Dict, Union\nfrom pydantic import BaseModel, validator\n\nclass OperationRequest(BaseModel):\n # Assuming OperationRequest is already defined as per the given text\n pass\n\nclass UpdateOperation(OperationRequest):\n new_items: List[str]\n\n def apply_and_maybe_raise(self, updatable_item: \"Updatable todo list\") -> None:\n # Assuming updatable_item is an instance of Updatable todo list\n self.validate()\n updatable_item.add_items(self.new_items)\n\nclass Updatable:\n # Abstract class for items that can be updated\n pass\n\nclass TodoList(Updatable):\n # Class that represents a todo list\n items: List[str]\n\n def add_items(self, new_items: List[str]) -> None:\n self.items.extend(new_items)\n\ndef handle_request(operation_request: OperationRequest) -> None:\n # Function to handle an operation request\n if isinstance(operation_request, UpdateOperation):\n operation_request.apply_and_maybe_raise(get_todo_list_for_update())\n else:\n raise ValueError(\"Invalid operation request\")\n\ndef get_todo_list_for_update() -> TodoList:\n # Function to get the todo list for update\n # Assuming this function returns the", + "tool_calls": [] + }, + "logprobs": null, + "finish_reason": "length", + "stop_reason": null + } + ], + "usage": { "prompt_tokens": 66, "total_tokens": 322, "completion_tokens": 256, "prompt_tokens_details": null }, + "prompt_logprobs": null +} +```` + +If the service response has a meaningful response in the value of the "choices.message.content" key, +then we consider the vLLM service to be successfully launched + +#### If you use TGI: + +```bash +DATA='{"inputs":"Implement a high-level API for a TODO list application. '\ +'The API takes as input an operation request and updates the TODO list in place. '\ +'If the request is invalid, raise an exception.",'\ +'"parameters":{"max_new_tokens":256,"do_sample": true}}' + curl http://${HOST_IP}:${CODEGEN_TGI_SERVICE_PORT}/generate \ -X POST \ - -d '{"inputs":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","parameters":{"max_new_tokens":256, "do_sample": true}}' \ + -d "$DATA" \ -H 'Content-Type: application/json' ``` -## Validate LLM service +Checking the response from the service. The response should be similar to JSON: + +````json +{ + "generated_text": " The supported operations are \"add_task\", \"complete_task\", and \"remove_task\". Each operation can be defined with a corresponding function in the API.\n\nAdd your API in the following format:\n\n```\nTODO App API\n\nsupported operations:\n\noperation name description\n----------------------- ------------------------------------------------\n \n```\n\nUse type hints for function parameters and return values. Specify a text description of the API's supported operations.\n\nUse the following code snippet as a starting point for your high-level API function:\n\n```\nclass TodoAPI:\n def __init__(self, tasks: List[str]):\n self.tasks = tasks # List of tasks to manage\n\n def add_task(self, task: str) -> None:\n self.tasks.append(task)\n\n def complete_task(self, task: str) -> None:\n self.tasks = [t for t in self.tasks if t != task]\n\n def remove_task(self, task: str) -> None:\n self.tasks = [t for t in self.tasks if t != task]\n\n def handle_request(self, request: Dict[str, str]) -> None:\n operation = request.get('operation')\n if operation == 'add_task':\n self.add_task(request.get('task'))\n elif" +} +```` + +If the service response has a meaningful response in the value of the "generated_text" key, +then we consider the TGI service to be successfully launched + +### 2. Validate the LLM Service ```bash -curl http://${HOST_IP}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions\ +DATA='{"query":"Implement a high-level API for a TODO list application. '\ +'The API takes as input an operation request and updates the TODO list in place. '\ +'If the request is invalid, raise an exception.",'\ +'"max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,'\ +'"repetition_penalty":1.03,"stream":false}' + +curl http://${HOST_IP}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions \ -X POST \ - -d '{"query":"Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception.","max_tokens":256,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' \ + -d "$DATA" \ -H 'Content-Type: application/json' ``` -## Validate MegaService +Checking the response from the service. The response should be similar to JSON: + +````json +{ + "id": "cmpl-4e89a590b1af46bfb37ce8f12b2996f8", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "logprobs": null, + "text": " The API should support the following operations:\n\n1. Add a new task to the TODO list.\n2. Remove a task from the TODO list.\n3. Mark a task as completed.\n4. Retrieve the list of all tasks.\n\nThe API should also support the following features:\n\n1. The ability to filter tasks based on their completion status.\n2. The ability to sort tasks based on their priority.\n3. The ability to search for tasks based on their description.\n\nHere is an example of how the API can be used:\n\n```python\ntodo_list = []\napi = TodoListAPI(todo_list)\n\n# Add tasks\napi.add_task(\"Buy groceries\")\napi.add_task(\"Finish homework\")\n\n# Mark a task as completed\napi.mark_task_completed(\"Buy groceries\")\n\n# Retrieve the list of all tasks\nprint(api.get_all_tasks())\n\n# Filter tasks based on completion status\nprint(api.filter_tasks(completed=True))\n\n# Sort tasks based on priority\napi.sort_tasks(priority=\"high\")\n\n# Search for tasks based on description\nprint(api.search_tasks(description=\"homework\"))\n```\n\nIn this example, the `TodoListAPI` class is used to manage the TODO list. The `add_task` method adds a new task to the list, the `mark_task_completed` method", + "stop_reason": null, + "prompt_logprobs": null + } + ], + "created": 1742270567, + "model": "Qwen/Qwen2.5-Coder-7B-Instruct", + "object": "text_completion", + "system_fingerprint": null, + "usage": { + "completion_tokens": 256, + "prompt_tokens": 37, + "total_tokens": 293, + "completion_tokens_details": null, + "prompt_tokens_details": null + } +} +```` + +If the service response has a meaningful response in the value of the "choices.text" key, +then we consider the vLLM service to be successfully launched + +### 3. Validate the MegaService ```bash -curl http://${HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen -H "Content-Type: application/json" -d '{ - "messages": "Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception." - }' +DATA='{"messages": "Implement a high-level API for a TODO list application. '\ +'The API takes as input an operation request and updates the TODO list in place. '\ +'If the request is invalid, raise an exception."}' + +curl http://${HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen \ + -H "Content-Type: application/json" \ + -d "$DATA" +``` + +Checking the response from the service. The response should be similar to text: + +```textmate +data: {"id":"cmpl-cc5dc73819c640469f7c7c7424fe57e6","choices":[{"finish_reason":null,"index":0,"logprobs":null,"text":" of","stop_reason":null}],"created":1742270725,"model":"Qwen/Qwen2.5-Coder-7B-Instruct","object":"text_completion","system_fingerprint":null,"usage":null} +........... +data: {"id":"cmpl-cc5dc73819c640469f7c7c7424fe57e6","choices":[{"finish_reason":null,"index":0,"logprobs":null,"text":" all","stop_reason":null}],"created":1742270725,"model":"Qwen/Qwen2.5-Coder-7B-Instruct","object":"text_completion","system_fingerprint":null,"usage":null} +data: {"id":"cmpl-cc5dc73819c640469f7c7c7424fe57e6","choices":[{"finish_reason":null,"index":0,"logprobs":null,"text":" tasks","stop_reason":null}],"created":1742270725,"model":"Qwen/Qwen2.5-Coder-7B-Instruct","object":"text_completion","system_fingerprint":null,"usage":null} +data: {"id":"cmpl-cc5dc73819c640469f7c7c7424fe57e6","choices":[{"finish_reason":"length","index":0,"logprobs":null,"text":",","stop_reason":null}],"created":1742270725,"model":"Qwen/Qwen2.5-Coder-7B-Instruct","object":"text_completion","system_fingerprint":null,"usage":null} +data: [DONE] +``` + +If the output lines in the "choices.text" keys contain words (tokens) containing meaning, then the service is considered launched successfully. + +### 4. Validate the Frontend (UI) + +To access the UI, use the URL - http://${EXTERNAL_HOST_IP}:${CODEGEN_UI_SERVICE_PORT} +A page should open when you click through to this address: + +![UI start page](../../../../assets/img/ui-starting-page.png) + +If a page of this type has opened, then we believe that the service is running and responding, +and we can proceed to functional UI testing. + +Let's enter the task for the service in the "Enter prompt here" field. +For example, "Write a Python code that returns the current time and date" and press Enter. +After that, a page with the result of the task should open: + +![UI result page](../../../../assets/img/ui-result-page.png) + +If the result shown on the page is correct, then we consider the verification of the UI service to be successful. + +### 5. Stop application + +#### If you use vLLM + +```bash +cd ~/codegen-install/GenAIExamples/CodeGen/docker_compose/amd/gpu/rocm +docker compose -f compose_vllm.yaml down +``` + +#### If you use TGI + +```bash +cd ~/codegen-install/GenAIExamples/CodeGen/docker_compose/amd/gpu/rocm +docker compose -f compose.yaml down ``` diff --git a/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml b/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml index b558697d8..5596284aa 100644 --- a/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml +++ b/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml @@ -1,4 +1,5 @@ # Copyright (C) 2024 Intel Corporation +# Copyright (c) 2024 Advanced Micro Devices, Inc. # SPDX-License-Identifier: Apache-2.0 services: @@ -15,9 +16,9 @@ services: https_proxy: ${https_proxy} HUGGING_FACE_HUB_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} - host_ip: ${host_ip} + host_ip: ${HOST_IP} healthcheck: - test: ["CMD-SHELL", "curl -f http://$host_ip:${CODEGEN_TGI_SERVICE_PORT:-8028}/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://${HOST_IP}:${CODEGEN_TGI_SERVICE_PORT:-8028}/health || exit 1"] interval: 10s timeout: 10s retries: 100 @@ -46,9 +47,11 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - LLM_ENDPOINT: "http://codegen-tgi-service" + LLM_ENDPOINT: ${CODEGEN_TGI_LLM_ENDPOINT} LLM_MODEL_ID: ${CODEGEN_LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + HF_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + LLM_COMPONENT_NAME: "OpeaTextGenService" restart: unless-stopped codegen-backend-server: image: ${REGISTRY:-opea}/codegen:${TAG:-latest} @@ -62,7 +65,8 @@ services: https_proxy: ${https_proxy} http_proxy: ${http_proxy} MEGA_SERVICE_HOST_IP: ${CODEGEN_MEGA_SERVICE_HOST_IP} - LLM_SERVICE_HOST_IP: "codegen-llm-server" + LLM_SERVICE_HOST_IP: ${HOST_IP} + LLM_SERVICE_PORT: ${CODEGEN_LLM_SERVICE_PORT} ipc: host restart: always codegen-ui-server: diff --git a/CodeGen/docker_compose/amd/gpu/rocm/compose_vllm.yaml b/CodeGen/docker_compose/amd/gpu/rocm/compose_vllm.yaml new file mode 100644 index 000000000..f63aca745 --- /dev/null +++ b/CodeGen/docker_compose/amd/gpu/rocm/compose_vllm.yaml @@ -0,0 +1,94 @@ +# Copyright (C) 2024 Intel Corporation +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +services: + codegen-vllm-service: + image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} + container_name: codegen-vllm-service + ports: + - "${CODEGEN_VLLM_SERVICE_PORT:-8081}:8011" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + HF_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + WILM_USE_TRITON_FLASH_ATTENTION: 0 + PYTORCH_JIT: 0 + healthcheck: + test: [ "CMD-SHELL", "curl -f http://${HOST_IP}:${CODEGEN_VLLM_SERVICE_PORT:-8028}/health || exit 1" ] + interval: 10s + timeout: 10s + retries: 100 + volumes: + - "./data:/data" + shm_size: 20G + devices: + - /dev/kfd:/dev/kfd + - /dev/dri/:/dev/dri/ + cap_add: + - SYS_PTRACE + group_add: + - video + security_opt: + - seccomp:unconfined + - apparmor=unconfined + command: "--model ${CODEGEN_LLM_MODEL_ID} --swap-space 16 --disable-log-requests --dtype float16 --tensor-parallel-size 4 --host 0.0.0.0 --port 8011 --num-scheduler-steps 1 --distributed-executor-backend \"mp\"" + ipc: host + codegen-llm-server: + image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} + container_name: codegen-llm-server + depends_on: + codegen-vllm-service: + condition: service_healthy + ports: + - "${CODEGEN_LLM_SERVICE_PORT:-9000}:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${CODEGEN_VLLM_ENDPOINT} + LLM_MODEL_ID: ${CODEGEN_LLM_MODEL_ID} + HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + HF_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + LLM_COMPONENT_NAME: "OpeaTextGenService" + restart: unless-stopped + codegen-backend-server: + image: ${REGISTRY:-opea}/codegen:${TAG:-latest} + container_name: codegen-backend-server + depends_on: + - codegen-llm-server + ports: + - "${CODEGEN_BACKEND_SERVICE_PORT:-7778}:7778" + environment: + no_proxy: ${no_proxy} + https_proxy: ${https_proxy} + http_proxy: ${http_proxy} + MEGA_SERVICE_HOST_IP: ${CODEGEN_MEGA_SERVICE_HOST_IP} + LLM_SERVICE_HOST_IP: ${HOST_IP} + LLM_SERVICE_PORT: ${CODEGEN_LLM_SERVICE_PORT} + ipc: host + restart: always + codegen-ui-server: + image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest} + container_name: codegen-ui-server + depends_on: + - codegen-backend-server + ports: + - "${CODEGEN_UI_SERVICE_PORT:-5173}:5173" + environment: + no_proxy: ${no_proxy} + https_proxy: ${https_proxy} + http_proxy: ${http_proxy} + BASIC_URL: ${CODEGEN_BACKEND_SERVICE_URL} + BACKEND_SERVICE_ENDPOINT: ${CODEGEN_BACKEND_SERVICE_URL} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/CodeGen/docker_compose/amd/gpu/rocm/set_env.sh b/CodeGen/docker_compose/amd/gpu/rocm/set_env.sh index 505c0d018..117f81667 100644 --- a/CodeGen/docker_compose/amd/gpu/rocm/set_env.sh +++ b/CodeGen/docker_compose/amd/gpu/rocm/set_env.sh @@ -1,16 +1,18 @@ #!/usr/bin/env bash # Copyright (C) 2024 Intel Corporation +# Copyright (c) 2024 Advanced Micro Devices, Inc. # SPDX-License-Identifier: Apache-2.0 ### The IP address or domain name of the server on which the application is running -export HOST_IP=direct-supercomputer1.powerml.co +export HOST_IP='' +export EXTERNAL_HOST_IP='' ### The port of the TGI service. On this port, the TGI service will accept connections export CODEGEN_TGI_SERVICE_PORT=8028 ### A token for accessing repositories with models -export CODEGEN_HUGGINGFACEHUB_API_TOKEN=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 +export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} ### Model ID export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct" @@ -28,7 +30,7 @@ export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP} export CODEGEN_BACKEND_SERVICE_PORT=18150 ### The URL of CodeGen backend service, used by the frontend service -export CODEGEN_BACKEND_SERVICE_URL="http://${HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" +export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" ### The endpoint of the LLM service to which requests to this service will be sent export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP} diff --git a/CodeGen/docker_compose/amd/gpu/rocm/set_env_vllm.sh b/CodeGen/docker_compose/amd/gpu/rocm/set_env_vllm.sh new file mode 100644 index 000000000..52d69da19 --- /dev/null +++ b/CodeGen/docker_compose/amd/gpu/rocm/set_env_vllm.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Intel Corporation +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +### The IP address or domain name of the server on which the application is running +export HOST_IP='' +export EXTERNAL_HOST_IP='' + +### The port of the vLLM service. On this port, the TGI service will accept connections +export CODEGEN_VLLM_SERVICE_PORT=8028 +export CODEGEN_VLLM_ENDPOINT="http://${HOST_IP}:${CODEGEN_VLLM_SERVICE_PORT}" + +### A token for accessing repositories with models +export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + +### Model ID +export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct" + +### The port of the LLM service. On this port, the LLM service will accept connections +export CODEGEN_LLM_SERVICE_PORT=9000 + +### The IP address or domain name of the server for CodeGen MegaService +export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP} + +### The port for CodeGen backend service +export CODEGEN_BACKEND_SERVICE_PORT=18150 + +### The URL of CodeGen backend service, used by the frontend service +export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" + +### The endpoint of the LLM service to which requests to this service will be sent +export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP} + +### The CodeGen service UI port +export CODEGEN_UI_SERVICE_PORT=18151 diff --git a/CodeGen/docker_image_build/build.yaml b/CodeGen/docker_image_build/build.yaml index 529984e35..3275aa71b 100644 --- a/CodeGen/docker_image_build/build.yaml +++ b/CodeGen/docker_image_build/build.yaml @@ -29,6 +29,11 @@ services: dockerfile: comps/llms/src/text-generation/Dockerfile extends: codegen image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} + vllm-rocm: + build: + context: GenAIComps + dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu + image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} vllm: build: context: vllm diff --git a/CodeGen/tests/test_compose_on_rocm.sh b/CodeGen/tests/test_compose_on_rocm.sh index f2d6f0ce5..361dc613e 100644 --- a/CodeGen/tests/test_compose_on_rocm.sh +++ b/CodeGen/tests/test_compose_on_rocm.sh @@ -34,7 +34,7 @@ function build_docker_images() { service_list="codegen codegen-ui llm-textgen" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log - docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu + docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm docker images && sleep 1s } @@ -51,7 +51,7 @@ function start_services() { export CODEGEN_BACKEND_SERVICE_PORT=7778 export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" export CODEGEN_UI_SERVICE_PORT=5173 - export host_ip=${ip_address} + export HOST_IP=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env @@ -100,15 +100,15 @@ function validate_services() { function validate_microservices() { # tgi for llm service validate_services \ - "${ip_address}:8028/generate" \ + "${ip_address}:${CODEGEN_TGI_SERVICE_PORT}/generate" \ "generated_text" \ "codegen-tgi-service" \ "codegen-tgi-service" \ '{"inputs":"def print_hello_world():","parameters":{"max_new_tokens":256, "do_sample": true}}' - + sleep 10 # llm microservice validate_services \ - "${ip_address}:9000/v1/chat/completions" \ + "${ip_address}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions" \ "data: " \ "codegen-llm-server" \ "codegen-llm-server" \ diff --git a/CodeGen/tests/test_compose_vllm_on_rocm.sh b/CodeGen/tests/test_compose_vllm_on_rocm.sh new file mode 100644 index 000000000..bb75bdafa --- /dev/null +++ b/CodeGen/tests/test_compose_vllm_on_rocm.sh @@ -0,0 +1,181 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + # If the opea_branch isn't main, replace the git clone branch in Dockerfile. + if [[ "${opea_branch}" != "main" ]]; then + cd $WORKPATH + OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git" + NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git" + find . -type f -name "Dockerfile*" | while read -r file; do + echo "Processing file: $file" + sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file" + done + fi + + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="vllm-rocm llm-textgen codegen codegen-ui" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/amd/gpu/rocm/ + + export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct" + export CODEGEN_VLLM_SERVICE_PORT=8028 + export CODEGEN_VLLM_ENDPOINT="http://${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}" + export CODEGEN_LLM_SERVICE_PORT=9000 + export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address} + export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address} + export CODEGEN_BACKEND_SERVICE_PORT=7778 + export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" + export CODEGEN_UI_SERVICE_PORT=5173 + export HOST_IP=${ip_address} + + sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env + + # Start Docker Containers + docker compose -f compose_vllm.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 500 ]]; do + docker logs codegen-vllm-service >& "${LOG_PATH}"/codegen-vllm-service_start.log + if grep -q "Application startup complete" "${LOG_PATH}"/codegen-vllm-service_start.log; then + break + fi + sleep 20s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 5s +} + +function validate_microservices() { + # vLLM for llm service + validate_services \ + "${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}/v1/chat/completions" \ + "content" \ + "codegen-vllm-service" \ + "codegen-vllm-service" \ + '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 17}' + sleep 10 + # llm microservice + validate_services \ + "${ip_address}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions" \ + "data: " \ + "codegen-llm-server" \ + "codegen-llm-server" \ + '{"query":"def print_hello_world():"}' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" \ + "print" \ + "codegen-backend-server" \ + "codegen-backend-server" \ + '{"messages": "def print_hello_world():"}' + +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniconda3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs=22.6.0 -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + + +function stop_docker() { + echo "OPENAI_API_KEY - ${OPENAI_API_KEY}" + cd $WORKPATH/docker_compose/amd/gpu/rocm/ + docker compose -f compose_vllm.yaml stop && docker compose -f compose_vllm.yaml rm -f +} + +function main() { + + stop_docker + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_services + + validate_microservices + validate_megaservice + validate_frontend + + stop_docker + echo y | docker system prune + cd $WORKPATH + +} + +main