Enhance CI/CD infrastructure (#593)
Signed-off-by: chensuyue <suyue.chen@intel.com> Signed-off-by: Sun, Xuehao <xuehao.sun@intel.com>
This commit is contained in:
77
.github/workflows/_example-workflow.yml
vendored
77
.github/workflows/_example-workflow.yml
vendored
@@ -6,6 +6,9 @@ permissions: read-all
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
node:
|
||||
required: true
|
||||
type: string
|
||||
example:
|
||||
required: true
|
||||
type: string
|
||||
@@ -37,30 +40,43 @@ on:
|
||||
default: "latest"
|
||||
required: false
|
||||
type: string
|
||||
GenAIComps_branch:
|
||||
default: "main"
|
||||
required: false
|
||||
type: string
|
||||
jobs:
|
||||
####################################################################################################
|
||||
# Image Build
|
||||
####################################################################################################
|
||||
build-images:
|
||||
if: ${{ fromJSON(inputs.build) }}
|
||||
strategy:
|
||||
matrix:
|
||||
node: ["docker-build-xeon", "docker-build-gaudi"]
|
||||
runs-on: ${{ matrix.node }}
|
||||
runs-on: "docker-build-${{ inputs.node }}"
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Clean Up Working Directory
|
||||
run: |
|
||||
sudo rm -rf ${{github.workspace}}/*
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Clone required Repo
|
||||
run: |
|
||||
cd ${{ github.workspace }}/${{ inputs.example }}/docker
|
||||
build_compose_path=${{ github.workspace }}/${{ inputs.example }}/docker/docker_build_compose.yaml
|
||||
if [[ $(grep -c "tei-gaudi:" ${docker_compose_path}) != 0 ]]; then
|
||||
git clone https://github.com/huggingface/tei-gaudi.git
|
||||
fi
|
||||
if [[ $(grep -c "vllm:" ${docker_compose_path}) != 0 ]]; then
|
||||
git clone https://github.com/huggingface/tei-gaudi.git
|
||||
fi
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps && checkout ${{ inputs.GenAIComps_branch }} && cd ../
|
||||
|
||||
- name: Build Image
|
||||
if: ${{ fromJSON(inputs.build) }}
|
||||
uses: opea-project/validation/actions/image-build@main
|
||||
with:
|
||||
work_dir: ${{ github.workspace }}/${{ inputs.example }}
|
||||
docker_compose_path: ${{ github.workspace }}/.github/workflows/docker/compose/${{ inputs.example }}-compose.yaml
|
||||
work_dir: ${{ github.workspace }}/${{ inputs.example }}/docker
|
||||
docker_compose_path: ${{ github.workspace }}/${{ inputs.example }}/docker/docker_build_compose.yaml
|
||||
registry: ${OPEA_IMAGE_REPO}opea
|
||||
tag: ${{ inputs.tag }}
|
||||
|
||||
@@ -68,17 +84,12 @@ jobs:
|
||||
# Trivy Scan
|
||||
####################################################################################################
|
||||
image-list:
|
||||
needs: [ build-images ]
|
||||
if: ${{ fromJSON(inputs.scan) }}
|
||||
needs: [build-images]
|
||||
if: ${{ fromJSON(inputs.scan) && inputs.node == 'gaudi' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.scan-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@v2.8.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -90,34 +101,32 @@ jobs:
|
||||
echo "matrix=$(cat ${compose_path} | yq -r '.[]' | jq 'keys' | jq -c '.')" >> $GITHUB_OUTPUT
|
||||
|
||||
scan-images:
|
||||
needs: [image-list]
|
||||
if: ${{ fromJSON(inputs.scan) }}
|
||||
runs-on: "docker-build-gaudi"
|
||||
needs: [image-list, build-images]
|
||||
if: ${{ fromJSON(inputs.scan) && inputs.node == 'gaudi'}}
|
||||
runs-on: "docker-build-${{ inputs.node }}"
|
||||
strategy:
|
||||
matrix:
|
||||
image: ${{ fromJSON(needs.image-list.outputs.matrix) }}
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@v2.8.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Pull Image
|
||||
run: docker pull ${OPEA_IMAGE_REPO}opea/${{ matrix.image }}:${{ inputs.tag }}
|
||||
run: |
|
||||
docker pull ${OPEA_IMAGE_REPO}opea/${{ matrix.image }}:${{ inputs.tag }}
|
||||
echo "OPEA_IMAGE_REPO=${OPEA_IMAGE_REPO}" >> $GITHUB_ENV
|
||||
|
||||
- name: Scan Container
|
||||
uses: opea-project/validation/actions/trivy-scan@main
|
||||
with:
|
||||
image-ref: ${OPEA_IMAGE_REPO}opea/${{ matrix.image }}:${{ inputs.tag }}
|
||||
image-ref: ${{ env.OPEA_IMAGE_REPO }}opea/${{ matrix.image }}:${{ inputs.tag }}
|
||||
output: ${{ inputs.example }}-${{ matrix.image }}-scan.txt
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: docker rmi -f ${OPEA_IMAGE_REPO}opea/${{ matrix.image }}:${{ inputs.tag }}
|
||||
|
||||
- uses: actions/upload-artifact@v4.3.4
|
||||
with:
|
||||
name: ${{ inputs.example }}-scan
|
||||
name: ${{ inputs.example }}-${{ matrix.image }}-scan
|
||||
path: ${{ inputs.example }}-${{ matrix.image }}-scan.txt
|
||||
overwrite: true
|
||||
|
||||
@@ -127,15 +136,11 @@ jobs:
|
||||
test-example-compose:
|
||||
needs: [build-images]
|
||||
if: ${{ fromJSON(inputs.test_compose) }}
|
||||
strategy:
|
||||
matrix:
|
||||
hardware: ["xeon", "gaudi"]
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_run-docker-compose.yml
|
||||
with:
|
||||
tag: ${{ inputs.tag }}
|
||||
example: ${{ inputs.example }}
|
||||
hardware: ${{ matrix.hardware }}
|
||||
hardware: ${{ inputs.node }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
@@ -145,14 +150,10 @@ jobs:
|
||||
test-k8s-manifest:
|
||||
needs: [build-images]
|
||||
if: ${{ fromJSON(inputs.test_k8s) }}
|
||||
strategy:
|
||||
matrix:
|
||||
hardware: ["xeon", "gaudi"]
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_manifest-e2e.yml
|
||||
with:
|
||||
example: ${{ inputs.example }}
|
||||
hardware: ${{ matrix.hardware }}
|
||||
hardware: ${{ inputs.node }}
|
||||
tag: ${{ inputs.tag }}
|
||||
secrets: inherit
|
||||
|
||||
@@ -162,7 +163,7 @@ jobs:
|
||||
####################################################################################################
|
||||
publish:
|
||||
needs: [image-list, build-images, scan-images, test-example-compose]
|
||||
if: ${{ fromJSON(inputs.publish) }}
|
||||
if: ${{ fromJSON(inputs.publish) && inputs.node == 'gaudi' }}
|
||||
strategy:
|
||||
matrix:
|
||||
image: ${{ fromJSON(needs.image-list.outputs.matrix) }}
|
||||
|
||||
3
.github/workflows/_run-docker-compose.yml
vendored
3
.github/workflows/_run-docker-compose.yml
vendored
@@ -72,6 +72,7 @@ jobs:
|
||||
sudo rm -rf ${{github.workspace}}/* || true
|
||||
docker system prune -f
|
||||
docker rmi $(docker images --filter reference="*/*/*:latest" -q) || true
|
||||
docker rmi $(docker images --filter reference="*/*:ci" -q) || true
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
@@ -93,7 +94,7 @@ jobs:
|
||||
test_case: ${{ matrix.test_case }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/$example/tests
|
||||
export IMAGE_REPO=${OPEA_IMAGE_REPO}
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then export IMAGE_REPO="${OPEA_IMAGE_REPO}opea"; fi
|
||||
if [ -f ${test_case} ]; then timeout 30m bash ${test_case}; else echo "Test script {${test_case}} not found, skip test!"; fi
|
||||
|
||||
- name: Clean up container
|
||||
|
||||
31
.github/workflows/manual-cd-workflow.yml
vendored
31
.github/workflows/manual-cd-workflow.yml
vendored
@@ -5,9 +5,14 @@ name: Examples CD workflow on manual event
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
nodes:
|
||||
default: "gaudi,xeon"
|
||||
description: "Hardware to run test"
|
||||
required: true
|
||||
type: string
|
||||
examples:
|
||||
default: "AudioQnA,ChatQnA,CodeGen,CodeTrans,DocSum,FaqGen,SearchQnA,Translation"
|
||||
description: 'List of examples to test'
|
||||
default: "ChatQnA"
|
||||
description: 'List of examples to test [AudioQnA,ChatQnA,CodeGen,CodeTrans,DocSum,FaqGen,SearchQnA,Translation]'
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
@@ -41,33 +46,44 @@ on:
|
||||
required: false
|
||||
type: boolean
|
||||
publish_tags:
|
||||
default: "latest,v0.9"
|
||||
default: "latest,v1.0"
|
||||
description: 'Tag list apply to publish images'
|
||||
required: false
|
||||
type: string
|
||||
GenAIComps_branch:
|
||||
default: "main"
|
||||
description: 'GenAIComps branch for image build'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions: read-all
|
||||
jobs:
|
||||
get-test-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.get-matrix.outputs.matrix }}
|
||||
examples: ${{ steps.get-matrix.outputs.examples }}
|
||||
nodes: ${{ steps.get-matrix.outputs.nodes }}
|
||||
steps:
|
||||
- name: Create Matrix
|
||||
id: get-matrix
|
||||
run: |
|
||||
examples=($(echo ${{ github.event.inputs.examples }} | tr ',' ' '))
|
||||
examples_json=$(printf '%s\n' "${examples[@]}" | sort -u | jq -R '.' | jq -sc '.')
|
||||
echo "matrix=$examples_json" >> $GITHUB_OUTPUT
|
||||
echo "examples=$examples_json" >> $GITHUB_OUTPUT
|
||||
nodes=($(echo ${{ github.event.inputs.nodes }} | tr ',' ' '))
|
||||
nodes_json=$(printf '%s\n' "${nodes[@]}" | sort -u | jq -R '.' | jq -sc '.')
|
||||
echo "nodes=$nodes_json" >> $GITHUB_OUTPUT
|
||||
|
||||
run-examples:
|
||||
needs: [get-test-matrix]
|
||||
strategy:
|
||||
matrix:
|
||||
example: ${{ fromJson(needs.get-test-matrix.outputs.matrix) }}
|
||||
example: ${{ fromJson(needs.get-test-matrix.outputs.examples) }}
|
||||
node: ${{ fromJson(needs.get-test-matrix.outputs.nodes) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_example-workflow.yml
|
||||
with:
|
||||
node: ${{ matrix.node }}
|
||||
example: ${{ matrix.example }}
|
||||
tag: ${{ inputs.tag }}
|
||||
build: ${{ fromJSON(inputs.build) }}
|
||||
@@ -75,5 +91,6 @@ jobs:
|
||||
test_compose: ${{ fromJSON(inputs.test_compose) }}
|
||||
test_k8s: ${{ fromJSON(inputs.test_k8s) }}
|
||||
publish: ${{ fromJSON(inputs.publish) }}
|
||||
publish_tags: ${{ fromJSON(inputs.publish_tags) }}
|
||||
publish_tags: ${{ inputs.publish_tags }}
|
||||
GenAIComps_branch: ${{ inputs.GenAIComps_branch }}
|
||||
secrets: inherit
|
||||
|
||||
21
.github/workflows/pr-docker-compose-e2e.yml
vendored
21
.github/workflows/pr-docker-compose-e2e.yml
vendored
@@ -4,7 +4,7 @@
|
||||
name: E2E test with docker compose
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
@@ -21,29 +21,20 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
get-test-matrix:
|
||||
uses: ./.github/workflows/_get-test-matrix.yml
|
||||
with:
|
||||
diff_excluded_files: '.github|README.md|*.txt|deprecate|kubernetes|manifest|gmc|assets'
|
||||
|
||||
mega-image-build:
|
||||
needs: job1
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
uses: ./.github/workflows/_image-build.yml
|
||||
with:
|
||||
image_tag: ${{ github.event.pull_request.head.sha }}
|
||||
mega_service: "${{ matrix.example }}"
|
||||
runner_label: "docker-build-${{ matrix.hardware }}"
|
||||
|
||||
example-test:
|
||||
needs: [job1, mega-image-build]
|
||||
needs: [get-test-matrix]
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
matrix: ${{ fromJSON(needs.get-test-matrix.outputs.run_matrix) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_run-docker-compose.yml
|
||||
with:
|
||||
tag: ${{ needs.mega-image-build.outputs.image_tag }}
|
||||
registry: "opea"
|
||||
tag: "ci"
|
||||
example: ${{ matrix.example }}
|
||||
hardware: ${{ matrix.hardware }}
|
||||
secrets: inherit
|
||||
|
||||
54
AudioQnA/docker/docker_build_compose.yaml
Normal file
54
AudioQnA/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
audioqna:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}
|
||||
whisper-gaudi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/asr/whisper/Dockerfile_hpu
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
|
||||
whisper:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/asr/whisper/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
||||
asr:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/asr/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
speecht5-gaudi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/tts/speecht5/Dockerfile_hpu
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
|
||||
speecht5:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/tts/speecht5/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
|
||||
tts:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/tts/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
|
||||
@@ -1,12 +1,9 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
whisper-service:
|
||||
image: opea/whisper-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
|
||||
container_name: whisper-service
|
||||
ports:
|
||||
- "7066:7066"
|
||||
@@ -22,7 +19,7 @@ services:
|
||||
- SYS_NICE
|
||||
restart: unless-stopped
|
||||
asr:
|
||||
image: opea/asr:latest
|
||||
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
|
||||
container_name: asr-service
|
||||
ports:
|
||||
- "3001:9099"
|
||||
@@ -30,7 +27,7 @@ services:
|
||||
environment:
|
||||
ASR_ENDPOINT: ${ASR_ENDPOINT}
|
||||
speecht5-service:
|
||||
image: opea/speecht5-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
|
||||
container_name: speecht5-service
|
||||
ports:
|
||||
- "7055:7055"
|
||||
@@ -46,7 +43,7 @@ services:
|
||||
- SYS_NICE
|
||||
restart: unless-stopped
|
||||
tts:
|
||||
image: opea/tts:latest
|
||||
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
|
||||
container_name: tts-service
|
||||
ports:
|
||||
- "3002:9088"
|
||||
@@ -75,7 +72,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -90,7 +87,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-gaudi-backend-server:
|
||||
image: opea/audioqna:latest
|
||||
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}
|
||||
container_name: audioqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- asr
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
whisper-service:
|
||||
image: opea/whisper:latest
|
||||
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
||||
container_name: whisper-service
|
||||
ports:
|
||||
- "7066:7066"
|
||||
@@ -17,7 +14,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
restart: unless-stopped
|
||||
asr:
|
||||
image: opea/asr:latest
|
||||
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
|
||||
container_name: asr-service
|
||||
ports:
|
||||
- "3001:9099"
|
||||
@@ -25,7 +22,7 @@ services:
|
||||
environment:
|
||||
ASR_ENDPOINT: ${ASR_ENDPOINT}
|
||||
speecht5-service:
|
||||
image: opea/speecht5:latest
|
||||
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
|
||||
container_name: speecht5-service
|
||||
ports:
|
||||
- "7055:7055"
|
||||
@@ -36,7 +33,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
restart: unless-stopped
|
||||
tts:
|
||||
image: opea/tts:latest
|
||||
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
|
||||
container_name: tts-service
|
||||
ports:
|
||||
- "3002:9088"
|
||||
@@ -58,7 +55,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -73,7 +70,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-xeon-backend-server:
|
||||
image: opea/audioqna:latest
|
||||
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}
|
||||
container_name: audioqna-xeon-backend-server
|
||||
depends_on:
|
||||
- asr
|
||||
|
||||
@@ -3,34 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/whisper-gaudi:latest -f comps/asr/whisper/Dockerfile_hpu .
|
||||
|
||||
docker build -t opea/asr:latest -f comps/asr/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/speecht5-gaudi:latest -f comps/tts/speecht5/Dockerfile_hpu .
|
||||
docker build -t opea/tts:latest -f comps/tts/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="audioqna whisper-gaudi asr llm-tgi speecht5-gaudi tts"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd ..
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/audioqna:latest -f Dockerfile .
|
||||
|
||||
# cd $WORKPATH/docker/ui
|
||||
# docker build --no-cache -t opea/audioqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -55,16 +47,6 @@ function start_services() {
|
||||
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/audioqna:latest#image: opea/audioqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/audioqna-ui:latest#image: opea/audioqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
n=0
|
||||
@@ -76,6 +58,16 @@ function start_services() {
|
||||
sleep 1s
|
||||
n=$((n+1))
|
||||
done
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 200 ]]; do
|
||||
docker logs whisper-service > $LOG_PATH/whisper_service_start.log
|
||||
if grep -q "Uvicorn server setup on port" $LOG_PATH/whisper_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
n=$((n+1))
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
@@ -131,7 +123,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
# validate_microservices
|
||||
|
||||
@@ -3,31 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/whisper:latest -f comps/asr/whisper/Dockerfile .
|
||||
docker build -t opea/asr:latest -f comps/asr/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/speecht5:latest -f comps/tts/speecht5/Dockerfile .
|
||||
docker build -t opea/tts:latest -f comps/tts/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="audioqna whisper asr llm-tgi speecht5 tts"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/audioqna:latest -f Dockerfile .
|
||||
|
||||
# cd $WORKPATH/docker/ui
|
||||
# docker build --no-cache -t opea/audioqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -51,16 +46,6 @@ function start_services() {
|
||||
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/audioqna:latest#image: opea/audioqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/audioqna-ui:latest#image: opea/audioqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
n=0
|
||||
@@ -128,7 +113,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_megaservice
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -11,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -39,7 +37,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -53,7 +51,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -85,7 +83,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-aipc-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -102,7 +100,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
llm:
|
||||
image: opea/llm-ollama
|
||||
image: ${REGISTRY:-opea}/llm-ollama
|
||||
container_name: llm-ollama
|
||||
ports:
|
||||
- "9000:9000"
|
||||
@@ -118,7 +116,7 @@ services:
|
||||
OLLAMA_ENDPOINT: ${OLLAMA_ENDPOINT}
|
||||
OLLAMA_MODEL: ${OLLAMA_MODEL}
|
||||
chaqna-aipc-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-aipc-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -142,7 +140,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-aipc-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-aipc-ui-server
|
||||
depends_on:
|
||||
- chaqna-aipc-backend-server
|
||||
|
||||
108
ChatQnA/docker/docker_build_compose.yaml
Normal file
108
ChatQnA/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
chatqna:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
chatqna-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
chatqna-conversation-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/langchain/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
retriever-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/retrievers/langchain/redis/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
retriever-qdrant:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/retrievers/haystack/qdrant/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/retriever-qdrant:${TAG:-latest}
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
llm-ollama:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/ollama/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-ollama:${TAG:-latest}
|
||||
llm-vllm:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm/docker/Dockerfile.microservice
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
|
||||
llm-vllm-hpu:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm/docker/Dockerfile.hpu
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm-hpu:${TAG:-latest}
|
||||
llm-vllm-ray:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm-ray:${TAG:-latest}
|
||||
llm-vllm-ray-hpu:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm-ray-hpu:${TAG:-latest}
|
||||
dataprep-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/dataprep/redis/langchain/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
dataprep-qdrant:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/dataprep/qdrant/docker/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/dataprep-qdrant:${TAG:-latest}
|
||||
tei-gaudi:
|
||||
build:
|
||||
context: tei-gaudi
|
||||
dockerfile: Dockerfile-hpu
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
vllm:
|
||||
build:
|
||||
context: vllm
|
||||
dockerfile: Dockerfile.cpu
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -28,7 +25,7 @@ services:
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: opea/tei-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
@@ -49,7 +46,7 @@ services:
|
||||
ENABLE_EXPERIMENTAL_FLAGS: true
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -63,7 +60,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -94,7 +91,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-gaudi-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -132,7 +129,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -149,7 +146,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
chaqna-gaudi-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -174,7 +171,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-gaudi-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- chaqna-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -49,7 +46,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${GURADRAILS_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
guardrails:
|
||||
image: opea/guardrails-tgi:latest
|
||||
image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest}
|
||||
container_name: guardrails-tgi-gaudi-server
|
||||
ports:
|
||||
- "9090:9090"
|
||||
@@ -63,7 +60,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: opea/tei-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
@@ -82,7 +79,7 @@ services:
|
||||
MAX_WARMUP_SEQUENCE_LENGTH: 512
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -96,7 +93,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -127,7 +124,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-gaudi-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -165,7 +162,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -182,7 +179,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
chaqna-gaudi-backend-server:
|
||||
image: opea/chatqna-guardrails:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-guardrails:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-guardrails-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -210,7 +207,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-gaudi-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- chaqna-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -28,7 +25,7 @@ services:
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: opea/tei-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
@@ -47,7 +44,7 @@ services:
|
||||
MAX_WARMUP_SEQUENCE_LENGTH: 512
|
||||
command: --model-id ${EMBEDDING_MODEL_ID}
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -61,7 +58,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -92,7 +89,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-gaudi-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -109,7 +106,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
vllm-service:
|
||||
image: opea/llm-vllm-hpu:latest
|
||||
image: ${REGISTRY:-opea}/llm-vllm-hpu:${TAG:-latest}
|
||||
container_name: vllm-gaudi-server
|
||||
ports:
|
||||
- "8007:80"
|
||||
@@ -129,7 +126,7 @@ services:
|
||||
ipc: host
|
||||
command: /bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $LLM_MODEL_ID --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048"
|
||||
llm:
|
||||
image: opea/llm-vllm:latest
|
||||
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
|
||||
container_name: llm-vllm-gaudi-server
|
||||
depends_on:
|
||||
- vllm-service
|
||||
@@ -145,7 +142,7 @@ services:
|
||||
LLM_MODEL: ${LLM_MODEL_ID}
|
||||
restart: unless-stopped
|
||||
chaqna-gaudi-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -171,7 +168,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-gaudi-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- chaqna-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -28,7 +25,7 @@ services:
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: opea/tei-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
@@ -47,7 +44,7 @@ services:
|
||||
MAX_WARMUP_SEQUENCE_LENGTH: 512
|
||||
command: --model-id ${EMBEDDING_MODEL_ID}
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -61,7 +58,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -92,7 +89,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-gaudi-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -109,7 +106,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
vllm-ray-service:
|
||||
image: opea/llm-vllm-ray-hpu:latest
|
||||
image: ${REGISTRY:-opea}/llm-vllm-ray-hpu:${TAG:-latest}
|
||||
container_name: vllm-ray-gaudi-server
|
||||
ports:
|
||||
- "8006:8000"
|
||||
@@ -129,7 +126,7 @@ services:
|
||||
ipc: host
|
||||
command: /bin/bash -c "ray start --head && python vllm_ray_openai.py --port_number 8000 --model_id_or_path $LLM_MODEL_ID --tensor_parallel_size 2 --enforce_eager True"
|
||||
llm:
|
||||
image: opea/llm-vllm-ray:latest
|
||||
image: ${REGISTRY:-opea}/llm-vllm-ray:${TAG:-latest}
|
||||
container_name: llm-vllm-ray-gaudi-server
|
||||
depends_on:
|
||||
- vllm-ray-service
|
||||
@@ -145,7 +142,7 @@ services:
|
||||
LLM_MODEL: ${LLM_MODEL_ID}
|
||||
restart: unless-stopped
|
||||
chaqna-gaudi-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -171,7 +168,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-gaudi-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- chaqna-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -50,7 +47,7 @@ services:
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -64,7 +61,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -102,7 +99,7 @@ services:
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -142,7 +139,7 @@ services:
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -159,7 +156,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
chaqna-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -184,7 +181,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-ui-server
|
||||
depends_on:
|
||||
- chaqna-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -42,7 +39,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -56,7 +53,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -88,7 +85,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-xeon-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -121,7 +118,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -138,7 +135,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
chaqna-xeon-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -164,7 +161,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
qdrant-vector-db:
|
||||
image: qdrant/qdrant
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6333:6333"
|
||||
- "6334:6334"
|
||||
dataprep-qdrant-service:
|
||||
image: opea/dataprep-qdrant:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-qdrant:${TAG:-latest}
|
||||
container_name: dataprep-qdrant-server
|
||||
depends_on:
|
||||
- qdrant-vector-db
|
||||
@@ -42,7 +39,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -56,7 +53,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-qdrant:latest
|
||||
image: ${REGISTRY:-opea}/retriever-qdrant:${TAG:-latest}
|
||||
container_name: retriever-qdrant-server
|
||||
depends_on:
|
||||
- qdrant-vector-db
|
||||
@@ -88,7 +85,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-xeon-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -121,7 +118,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -138,7 +135,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
chaqna-xeon-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
- qdrant-vector-db
|
||||
@@ -167,7 +164,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
redis-vector-db:
|
||||
image: redis/redis-stack:7.2.0-v9
|
||||
@@ -12,7 +9,7 @@ services:
|
||||
- "6379:6379"
|
||||
- "8001:8001"
|
||||
dataprep-redis-service:
|
||||
image: opea/dataprep-redis:latest
|
||||
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -41,7 +38,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -58,7 +55,7 @@ services:
|
||||
LANGCHAIN_PROJECT: "opea-embedding-service"
|
||||
restart: unless-stopped
|
||||
retriever:
|
||||
image: opea/retriever-redis:latest
|
||||
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
||||
container_name: retriever-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -93,7 +90,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-xeon-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -113,7 +110,7 @@ services:
|
||||
LANGCHAIN_PROJECT: "opea-reranking-service"
|
||||
restart: unless-stopped
|
||||
vllm_service:
|
||||
image: opea/vllm:latest
|
||||
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
||||
container_name: vllm-service
|
||||
ports:
|
||||
- "9009:80"
|
||||
@@ -128,7 +125,7 @@ services:
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
|
||||
llm:
|
||||
image: opea/llm-vllm:latest
|
||||
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
|
||||
container_name: llm-vllm-server
|
||||
depends_on:
|
||||
- vllm_service
|
||||
@@ -149,7 +146,7 @@ services:
|
||||
LANGCHAIN_PROJECT: "opea-llm-service"
|
||||
restart: unless-stopped
|
||||
chaqna-xeon-backend-server:
|
||||
image: opea/chatqna:latest
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
@@ -174,7 +171,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
image: opea/chatqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
|
||||
@@ -3,47 +3,33 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-redis:latest -f comps/retrievers/langchain/redis/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/dataprep-redis:latest -f comps/dataprep/redis/langchain/docker/Dockerfile .
|
||||
|
||||
# cd ..
|
||||
# git clone https://github.com/huggingface/tei-gaudi
|
||||
# cd tei-gaudi/
|
||||
# docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi tei-gaudi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
# build tei-gaudi for each test instead of pull from local registry
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
cd tei-gaudi/
|
||||
docker build --no-cache -q -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
|
||||
cd $WORKPATH/docker/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
@@ -67,24 +53,9 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
if [ "${mode}" == "perf" ]; then
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
else
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
sed -i "s#image: ${IMAGE_REPO}opea/tei-gaudi:latest#image: opea/tei-gaudi:latest#g" compose.yaml
|
||||
fi
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 400 ]]; do
|
||||
docker logs tgi-gaudi-server > ${LOG_PATH}/tgi_service_start.log
|
||||
@@ -246,13 +217,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -275,7 +249,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -2,29 +2,28 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-redis:latest -f comps/retrievers/langchain/redis/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/dataprep-redis:latest -f comps/dataprep/redis/langchain/docker/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui chatqna-conversation-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
docker images
|
||||
}
|
||||
@@ -54,21 +53,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
if [ "${mode}" == "perf" ]; then
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
else
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
fi
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
n=0
|
||||
@@ -233,14 +217,17 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
echo "[ TEST INFO ]: --------- conda env activated ---------"
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -263,7 +250,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -3,28 +3,24 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-qdrant:latest -f comps/retrievers/haystack/qdrant/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/dataprep-qdrant:latest -f comps/dataprep/qdrant/docker/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-qdrant embedding-tei retriever-qdrant reranking-tei llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker images
|
||||
}
|
||||
@@ -56,16 +52,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose_qdrant.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose_qdrant.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose_qdrant.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose_qdrant.yaml
|
||||
cat compose_qdrant.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_qdrant.yaml up -d
|
||||
n=0
|
||||
@@ -235,7 +221,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -3,47 +3,31 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-redis:latest -f comps/retrievers/langchain/redis/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-vllm-hpu:latest -f comps/llms/text-generation/vllm/docker/Dockerfile.hpu .
|
||||
docker build -t opea/llm-vllm:latest -f comps/llms/text-generation/vllm/docker/Dockerfile.microservice .
|
||||
docker build -t opea/dataprep-redis:latest -f comps/dataprep/redis/langchain/docker/Dockerfile .
|
||||
|
||||
# cd ..
|
||||
# git clone https://github.com/huggingface/tei-gaudi
|
||||
# cd tei-gaudi/
|
||||
# docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei tei-gaudi llm-vllm-hpu llm-vllm"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.2
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
# build tei-gaudi for each test instead of pull from local registry
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
cd tei-gaudi/
|
||||
docker build --no-cache -q -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
|
||||
cd $WORKPATH/docker/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
@@ -65,18 +49,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose_vllm.yaml
|
||||
sed -i "s#image: ${IMAGE_REPO}opea/tei-gaudi:latest#image: opea/tei-gaudi:latest#g" compose_vllm.yaml
|
||||
echo "cat compose_vllm.yaml"
|
||||
cat compose_vllm.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_vllm.yaml up -d
|
||||
n=0
|
||||
@@ -198,13 +170,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -227,7 +202,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -3,43 +3,33 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-redis:latest -f comps/retrievers/langchain/redis/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-vllm:latest -f comps/llms/text-generation/vllm/docker/Dockerfile.microservice .
|
||||
docker build -t opea/dataprep-redis:latest -f comps/dataprep/redis/langchain/docker/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
git clone https://github.com/vllm-project/vllm.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-vllm vllm"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
# cd $WORKPATH
|
||||
# git clone https://github.com/vllm-project/vllm.git
|
||||
# cd vllm
|
||||
# docker build --no-cache -t opea/vllm:latest -f Dockerfile.cpu .
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/vllm-project/vllm.git
|
||||
cd vllm
|
||||
docker build --no-cache -t opea/vllm:latest -f Dockerfile.cpu .
|
||||
|
||||
cd $WORKPATH/docker/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
@@ -62,29 +52,15 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
if [ "${mode}" == "perf" ]; then
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose_vllm.yaml
|
||||
else
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose_vllm.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose_vllm.yaml
|
||||
sed -i "s#image: ${IMAGE_REPO}opea/vllm:latest#image: opea/vllm:latest#g" compose_vllm.yaml
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_vllm.yaml up -d
|
||||
n=0
|
||||
until [[ "$n" -ge 10 ]]; do
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs vllm-service > ${LOG_PATH}/vllm_service_start.log
|
||||
if grep -q Connected ${LOG_PATH}/vllm_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 10s
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
}
|
||||
@@ -196,13 +172,17 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -225,7 +205,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
@@ -3,46 +3,31 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
echo "IMAGE_REPO=${IMAGE_REPO}"
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/retriever-redis:latest -f comps/retrievers/langchain/redis/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-vllm-ray-hpu:latest -f comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray .
|
||||
docker build -t opea/llm-vllm-ray:latest -f comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice .
|
||||
docker build -t opea/dataprep-redis:latest -f comps/dataprep/redis/langchain/docker/Dockerfile .
|
||||
|
||||
# cd ..
|
||||
# git clone https://github.com/huggingface/tei-gaudi
|
||||
# cd tei-gaudi/
|
||||
# docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei tei-gaudi llm-vllm-ray-hpu llm-vllm-ray"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.2
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/chatqna:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
# build tei-gaudi for each test instead of pull from local registry
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
cd tei-gaudi/
|
||||
docker build --no-cache -q -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
|
||||
cd $WORKPATH/docker/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
@@ -65,18 +50,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/chatqna:latest#image: opea/chatqna:${IMAGE_TAG}#g" compose_vllm_ray.yaml
|
||||
sed -i "s#image: opea/chatqna-ui:latest#image: opea/chatqna-ui:${IMAGE_TAG}#g" compose_vllm_ray.yaml
|
||||
sed -i "s#image: opea/chatqna-conversation-ui:latest#image: opea/chatqna-conversation-ui:${IMAGE_TAG}#g" compose_vllm_ray.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose_vllm_ray.yaml
|
||||
sed -i "s#image: ${IMAGE_REPO}opea/tei-gaudi:latest#image: opea/tei-gaudi:latest#g" compose_vllm_ray.yaml
|
||||
echo "cat compose_vllm_ray.yaml"
|
||||
cat compose_vllm_ray.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_vllm_ray.yaml up -d
|
||||
n=0
|
||||
@@ -198,13 +171,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -227,7 +203,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
|
||||
30
CodeGen/docker/docker_build_compose.yaml
Normal file
30
CodeGen/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
codegen:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
|
||||
codegen-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}
|
||||
codegen-react-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/codegen-conversation-ui:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
extends: codegen
|
||||
image: ${REGISTRY}opea/llm-tgi:${TAG:-latest}
|
||||
@@ -148,16 +148,24 @@ To access the frontend, open the following URL in your browser: `http://{host_ip
|
||||
|
||||

|
||||
|
||||
## 🚀 Launch the React Based UI
|
||||
## 🚀 Launch the React Based UI (Optional)
|
||||
|
||||
To access the frontend, open the following URL in your browser: `http://{host_ip}:5174`. By default, the UI runs on port 5174 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `compose.yaml` file as shown below:
|
||||
To access the React-based frontend, modify the UI service in the `compose.yaml` file. Replace `codegen-gaudi-ui-server` service with the `codegen-gaudi-react-ui-server` service as per the config below:
|
||||
|
||||
```yaml
|
||||
codegen-gaudi-react-ui-server:
|
||||
image: opea/codegen-react-ui:latest
|
||||
...
|
||||
ports:
|
||||
- "80:5174"
|
||||
codegen-gaudi-react-ui-server:
|
||||
image: ${REGISTRY:-opea}/codegen-react-ui:${TAG:-latest}
|
||||
container_name: codegen-gaudi-react-ui-server
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
depends_on:
|
||||
- codegen-gaudi-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
ipc: host
|
||||
restart: always
|
||||
```
|
||||
|
||||

|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
@@ -25,7 +22,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -40,7 +37,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
codegen-gaudi-backend-server:
|
||||
image: opea/codegen:latest
|
||||
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
|
||||
container_name: codegen-gaudi-backend-server
|
||||
depends_on:
|
||||
- llm
|
||||
@@ -55,7 +52,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
codegen-gaudi-ui-server:
|
||||
image: opea/codegen-ui:latest
|
||||
image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}
|
||||
container_name: codegen-gaudi-ui-server
|
||||
depends_on:
|
||||
- codegen-gaudi-backend-server
|
||||
@@ -69,16 +66,6 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
codegen-gaudi-react-ui-server:
|
||||
image: opea/codegen-react-ui:latest
|
||||
container_name: codegen-gaudi-react-ui-server
|
||||
depends_on:
|
||||
- codegen-gaudi-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
|
||||
@@ -155,16 +155,24 @@ Here is an example of running CodeGen in the UI:
|
||||
|
||||

|
||||
|
||||
## 🚀 Launch the React Based UI
|
||||
## 🚀 Launch the React Based UI (Optional)
|
||||
|
||||
To access the frontend, open the following URL in your browser: `http://{host_ip}:5174`. By default, the UI runs on port 5174 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `compose.yaml` file as shown below:
|
||||
To access the React-based frontend, modify the UI service in the `compose.yaml` file. Replace `codegen-xeon-ui-server` service with the `codegen-xeon-react-ui-server` service as per the config below:
|
||||
|
||||
```yaml
|
||||
codegen-xeon-react-ui-server:
|
||||
image: opea/codegen-react-ui:latest
|
||||
...
|
||||
ports:
|
||||
- "80:5174"
|
||||
codegen-xeon-react-ui-server:
|
||||
image: ${REGISTRY:-opea}/codegen-react-ui:${TAG:-latest}
|
||||
container_name: codegen-xeon-react-ui-server
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
depends_on:
|
||||
- codegen-xeon-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
ipc: host
|
||||
restart: always
|
||||
```
|
||||
|
||||

|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/text-generation-inference:1.4
|
||||
@@ -20,7 +17,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -35,7 +32,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
codegen-xeon-backend-server:
|
||||
image: opea/codegen:latest
|
||||
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
|
||||
container_name: codegen-xeon-backend-server
|
||||
depends_on:
|
||||
- llm
|
||||
@@ -50,7 +47,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
codegen-xeon-ui-server:
|
||||
image: opea/codegen-ui:latest
|
||||
image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}
|
||||
container_name: codegen-xeon-ui-server
|
||||
depends_on:
|
||||
- codegen-xeon-backend-server
|
||||
@@ -63,15 +60,6 @@ services:
|
||||
- BASIC_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
codegen-xeon-react-ui-server:
|
||||
image: opea/codegen-react-ui:latest
|
||||
container_name: codegen-xeon-react-ui-server
|
||||
depends_on:
|
||||
- codegen-xeon-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -3,33 +3,31 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/codegen:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/codegen-ui:latest -f docker/Dockerfile .
|
||||
docker build --no-cache --build-arg BACKEND_SERVICE_ENDPOINT=http://${ip_address}:7778/v1/codegen -t opea/codegen--react-ui:latest -f docker/Dockerfile.react .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker/gaudi
|
||||
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
export TGI_LLM_ENDPOINT="http://${ip_address}:8028"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
@@ -39,17 +37,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/codegen:latest#image: opea/codegen:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codegen-ui:latest#image: opea/codegen-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codegen-react-ui:latest#image: opea/codegen-react-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -126,13 +113,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -156,7 +146,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -3,27 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/codegen:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/codegen-ui:latest -f docker/Dockerfile .
|
||||
docker build --no-cache --build-arg BACKEND_SERVICE_ENDPOINT=http://${ip_address}:7778/v1/codegen -t opea/codegen-react-ui:latest -f docker/Dockerfile.react .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -39,17 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/codegen:latest#image: opea/codegen:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codegen-ui:latest#image: opea/codegen-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codegen-react-ui:latest#image: opea/codegen-react-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -126,13 +114,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -157,7 +148,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
24
CodeTrans/docker/docker_build_compose.yaml
Normal file
24
CodeTrans/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
codetrans:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/codetrans:${TAG:-latest}
|
||||
codetrans-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: codetrans
|
||||
image: ${REGISTRY:-opea}/codetrans-ui:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
extends: codetrans
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
@@ -25,7 +22,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
ports:
|
||||
- "9000:9000"
|
||||
@@ -38,7 +35,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
codetrans-gaudi-backend-server:
|
||||
image: opea/codetrans:latest
|
||||
image: ${REGISTRY:-opea}/codetrans:${TAG:-latest}
|
||||
container_name: codetrans-gaudi-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -54,7 +51,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
codetrans-gaudi-ui-server:
|
||||
image: opea/codetrans-ui:latest
|
||||
image: ${REGISTRY:-opea}/codetrans-ui:${TAG:-latest}
|
||||
container_name: codetrans-gaudi-ui-server
|
||||
depends_on:
|
||||
- codetrans-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/text-generation-inference:1.4
|
||||
@@ -20,7 +17,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
ports:
|
||||
- "9000:9000"
|
||||
@@ -33,7 +30,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
codetrans-xeon-backend-server:
|
||||
image: opea/codetrans:latest
|
||||
image: ${REGISTRY:-opea}/codetrans:${TAG:-latest}
|
||||
container_name: codetrans-xeon-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -49,7 +46,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
codetrans-xeon-ui-server:
|
||||
image: opea/codetrans-ui:latest
|
||||
image: ${REGISTRY:-opea}/codetrans-ui:${TAG:-latest}
|
||||
container_name: codetrans-xeon-ui-server
|
||||
depends_on:
|
||||
- codetrans-xeon-backend-server
|
||||
|
||||
@@ -2,27 +2,27 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codetrans codetrans-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/codetrans:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/codetrans-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -40,16 +40,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/codetrans:latest#image: opea/codetrans:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codetrans-ui:latest#image: opea/codetrans-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -124,13 +114,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -155,7 +148,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -3,24 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/codetrans:latest -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/codetrans-ui:latest -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codetrans codetrans-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -37,16 +39,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/codetrans:latest#image: opea/codetrans:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/codetrans-ui:latest#image: opea/codetrans-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -123,13 +115,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -153,7 +148,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
30
DocSum/docker/docker_build_compose.yaml
Normal file
30
DocSum/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
docsum:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
|
||||
docsum-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: docsum
|
||||
image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest}
|
||||
docsum-react-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: docsum
|
||||
image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest}
|
||||
llm-docsum-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/summarization/tgi/Dockerfile
|
||||
extends: docsum
|
||||
image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest}
|
||||
@@ -127,7 +127,24 @@ Here is an example for summarizing a article.
|
||||
|
||||

|
||||
|
||||
## 🚀 Launch the React UI
|
||||
## 🚀 Launch the React UI (Optional)
|
||||
|
||||
To access the React-based frontend, modify the UI service in the `compose.yaml` file. Replace `docsum-xeon-ui-server` service with the `docsum-xeon-react-ui-server` service as per the config below:
|
||||
|
||||
```yaml
|
||||
docsum-gaudi-react-ui-server:
|
||||
image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest}
|
||||
container_name: docsum-gaudi-react-ui-server
|
||||
depends_on:
|
||||
- docsum-gaudi-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
```
|
||||
|
||||
Open this URL `http://{host_ip}:5175` in your browser to access the frontend.
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
@@ -24,7 +21,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-docsum-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest}
|
||||
container_name: llm-docsum-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -39,7 +36,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
docsum-gaudi-backend-server:
|
||||
image: opea/docsum:latest
|
||||
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
|
||||
container_name: docsum-gaudi-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -55,7 +52,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
docsum-gaudi-ui-server:
|
||||
image: opea/docsum-ui:latest
|
||||
image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest}
|
||||
container_name: docsum-gaudi-ui-server
|
||||
depends_on:
|
||||
- docsum-gaudi-backend-server
|
||||
@@ -68,15 +65,6 @@ services:
|
||||
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
docsum-xeon-react-ui-server:
|
||||
image: opea/docsum-react-ui:latest
|
||||
container_name: docsum-gaudi-react-ui-server
|
||||
depends_on:
|
||||
- docsum-gaudi-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
environment:
|
||||
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -135,6 +135,24 @@ Open this URL `http://{host_ip}:5174` in your browser to access the React based
|
||||
|
||||

|
||||
|
||||
### React UI
|
||||
### React UI (Optional)
|
||||
|
||||
To access the React-based frontend, modify the UI service in the `compose.yaml` file. Replace `docsum-xeon-ui-server` service with the `docsum-xeon-react-ui-server` service as per the config below:
|
||||
|
||||
```yaml
|
||||
docsum-xeon-react-ui-server:
|
||||
image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest}
|
||||
container_name: docsum-xeon-react-ui-server
|
||||
depends_on:
|
||||
- docsum-xeon-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
ipc: host
|
||||
restart: always
|
||||
```
|
||||
|
||||

|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/text-generation-inference:1.4
|
||||
@@ -21,7 +18,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-docsum-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest}
|
||||
container_name: llm-docsum-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -36,7 +33,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
docsum-xeon-backend-server:
|
||||
image: opea/docsum:latest
|
||||
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
|
||||
container_name: docsum-xeon-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -52,7 +49,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
docsum-xeon-ui-server:
|
||||
image: opea/docsum-ui:latest
|
||||
image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest}
|
||||
container_name: docsum-xeon-ui-server
|
||||
depends_on:
|
||||
- docsum-xeon-backend-server
|
||||
@@ -65,15 +62,6 @@ services:
|
||||
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
docsum-xeon-react-ui-server:
|
||||
image: opea/docsum-react-ui:latest
|
||||
container_name: docsum-xeon-react-ui-server
|
||||
depends_on:
|
||||
- docsum-xeon-backend-server
|
||||
ports:
|
||||
- "5174:80"
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -2,28 +2,27 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build --no-cache -t opea/llm-docsum-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="docsum docsum-ui llm-docsum-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
docker build --no-cache -t opea/docsum-react-ui:latest --build-arg BACKEND_SERVICE_ENDPOINT=http://${ip_address}:8888/v1/docsum -f docker/Dockerfile.react .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -39,17 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/docsum:latest#image: opea/docsum:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/docsum-ui:latest#image: opea/docsum-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/docsum-react-ui:latest#image: opea/docsum-react-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -126,13 +114,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -156,7 +147,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -2,26 +2,27 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build --no-cache -t opea/llm-docsum-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
docker build --no-cache -t opea/docsum-react-ui:latest --build-arg BACKEND_SERVICE_ENDPOINT=http://${ip_address}:8888/v1/docsum -f docker/Dockerfile.react .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="docsum docsum-ui llm-docsum-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -37,17 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/docsum:latest#image: opea/docsum:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/docsum-ui:latest#image: opea/docsum-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/docsum-react-ui:latest#image: opea/docsum-react-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -123,13 +113,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -153,7 +146,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
30
FaqGen/docker/docker_build_compose.yaml
Normal file
30
FaqGen/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
faqgen:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/faqgen:${TAG:-latest}
|
||||
faqgen-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: faqgen
|
||||
image: ${REGISTRY:-opea}/faqgen-ui:${TAG:-latest}
|
||||
faqgen-react-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: faqgen
|
||||
image: ${REGISTRY:-opea}/faqgen-react-ui:${TAG:-latest}
|
||||
llm-faqgen-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/faq-generation/tgi/Dockerfile
|
||||
extends: faqgen
|
||||
image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest}
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
@@ -26,7 +24,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 2048 --max-total-tokens 4096 --max-batch-total-tokens 65536 --max-batch-prefill-tokens 4096
|
||||
llm_faqgen:
|
||||
image: opea/llm-faqgen-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest}
|
||||
container_name: llm-faqgen-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -41,7 +39,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
faqgen-gaudi-backend-server:
|
||||
image: opea/faqgen:latest
|
||||
image: ${REGISTRY:-opea}/faqgen:${TAG:-latest}
|
||||
container_name: faqgen-gaudi-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -57,7 +55,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
faqgen-gaudi-ui-server:
|
||||
image: opea/faqgen-ui:latest
|
||||
image: ${REGISTRY:-opea}/faqgen-ui:${TAG:-latest}
|
||||
container_name: faqgen-gaudi-ui-server
|
||||
depends_on:
|
||||
- faqgen-gaudi-backend-server
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/text-generation-inference:1.4
|
||||
@@ -20,7 +18,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm_faqgen:
|
||||
image: opea/llm-faqgen-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest}
|
||||
container_name: llm-faqgen-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -35,7 +33,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
faqgen-xeon-backend-server:
|
||||
image: opea/faqgen:latest
|
||||
image: ${REGISTRY:-opea}/faqgen:${TAG:-latest}
|
||||
container_name: faqgen-xeon-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -51,7 +49,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
faqgen-xeon-ui-server:
|
||||
image: opea/faqgen-ui:latest
|
||||
image: ${REGISTRY:-opea}/faqgen-ui:${TAG:-latest}
|
||||
container_name: faqgen-xeon-ui-server
|
||||
depends_on:
|
||||
- faqgen-xeon-backend-server
|
||||
|
||||
@@ -3,26 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/llm-faqgen-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/faq-generation/tgi/Dockerfile .
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="faqgen faqgen-ui llm-faqgen-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:1.2.1
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/faqgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/faqgen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -38,14 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
# if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# # Replace the container name with a test-specific name
|
||||
# echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
# sed -i "s#image: opea/faqgen:latest#image: opea/faqgen:${IMAGE_TAG}#g" compose.yaml
|
||||
# sed -i "s#image: opea/faqgen-ui:latest#image: opea/faqgen-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
# sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
# fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -122,13 +114,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -152,7 +147,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
build_docker_images
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -3,25 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build --no-cache -t opea/llm-faqgen-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/faq-generation/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/faqgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/faqgen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="faqgen faqgen-ui llm-faqgen-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -37,14 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
# if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# # Replace the container name with a test-specific name
|
||||
# echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
# sed -i "s#image: opea/faqgen:latest#image: opea/faqgen:${IMAGE_TAG}#g" compose.yaml
|
||||
# sed -i "s#image: opea/faqgen-ui:latest#image: opea/faqgen-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
# sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
# fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -121,13 +114,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -151,7 +147,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
build_docker_images
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
48
SearchQnA/docker/docker_build_compose.yaml
Normal file
48
SearchQnA/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
searchqna:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/searchqna:${TAG:-latest}
|
||||
searchqna-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/searchqna-ui:${TAG:-latest}
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/langchain/docker/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
web-retriever-chroma:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/web_retrievers/langchain/chroma/docker/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/docker/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
tei-gaudi:
|
||||
build:
|
||||
context: tei-gaudi
|
||||
dockerfile: Dockerfile-hpu
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
@@ -1,12 +1,9 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tei-embedding-service:
|
||||
image: opea/tei-gaudi:latest
|
||||
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "3001:80"
|
||||
@@ -27,7 +24,7 @@ services:
|
||||
ENABLE_EXPERIMENTAL_FLAGS: true
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -41,7 +38,7 @@ services:
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
restart: unless-stopped
|
||||
web-retriever:
|
||||
image: opea/web-retriever-chroma:latest
|
||||
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
|
||||
container_name: web-retriever-chroma-server
|
||||
ports:
|
||||
- "3003:7077"
|
||||
@@ -68,7 +65,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-xeon-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -104,7 +101,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -121,7 +118,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
searchqna-gaudi-backend-server:
|
||||
image: opea/searchqna:latest
|
||||
image: ${REGISTRY:-opea}/searchqna:${TAG:-latest}
|
||||
container_name: searchqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -149,7 +146,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
searchqna-gaudi-ui-server:
|
||||
image: opea/searchqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/searchqna-ui:${TAG:-latest}
|
||||
container_name: searchqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- searchqna-gaudi-backend-server
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -19,7 +16,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
embedding:
|
||||
image: opea/embedding-tei:latest
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -34,7 +31,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
web-retriever:
|
||||
image: opea/web-retriever-chroma:latest
|
||||
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
|
||||
container_name: web-retriever-chroma-server
|
||||
ports:
|
||||
- "3003:7077"
|
||||
@@ -61,7 +58,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
||||
reranking:
|
||||
image: opea/reranking-tei:latest
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
container_name: reranking-tei-xeon-server
|
||||
depends_on:
|
||||
- tei-reranking-service
|
||||
@@ -90,7 +87,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -105,7 +102,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
searchqna-xeon-backend-server:
|
||||
image: opea/searchqna:latest
|
||||
image: ${REGISTRY:-opea}/searchqna:${TAG:-latest}
|
||||
container_name: searchqna-xeon-backend-server
|
||||
depends_on:
|
||||
- tei-embedding-service
|
||||
@@ -133,7 +130,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
searchqna-gaudi-ui-server:
|
||||
image: opea/searchqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/searchqna-ui:${TAG:-latest}
|
||||
container_name: searchqna-xeon-ui-server
|
||||
depends_on:
|
||||
- searchqna-xeon-backend-server
|
||||
|
||||
@@ -2,46 +2,33 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# for test
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
|
||||
docker build --no-cache -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build --no-cache -t opea/web-retriever-chroma:latest -f comps/web_retrievers/langchain/chroma/docker/Dockerfile .
|
||||
docker build --no-cache -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
# cd ..
|
||||
# git clone https://github.com/huggingface/tei-gaudi
|
||||
# cd tei-gaudi/
|
||||
# docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-tgi tei-gaudi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/searchqna:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/searchqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
# build tei-gaudi for each test instead of pull from local registry
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/huggingface/tei-gaudi
|
||||
cd tei-gaudi/
|
||||
docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
|
||||
|
||||
cd $WORKPATH/docker/gaudi
|
||||
export GOOGLE_CSE_ID=$GOOGLE_CSE_ID
|
||||
@@ -71,17 +58,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/searchqna:latest#image: opea/searchqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/searchqna-ui:latest#image: opea/searchqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
sed -i "s#image: ${IMAGE_REPO}opea/tei-gaudi:latest#image: opea/tei-gaudi:latest#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
n=0
|
||||
@@ -118,13 +94,16 @@ function validate_frontend() {
|
||||
local conda_env_name="OPEA_e2e"
|
||||
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -147,7 +126,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_megaservice
|
||||
|
||||
@@ -3,28 +3,27 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
cd $WORKPATH/docker
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build -t opea/embedding-tei:latest -f comps/embeddings/langchain/docker/Dockerfile .
|
||||
docker build -t opea/web-retriever-chroma:latest -f comps/web_retrievers/langchain/chroma/docker/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest -f comps/reranks/tei/docker/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
cd $WORKPATH/docker
|
||||
docker build -t opea/searchqna:latest -f Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/searchqna-ui:latest -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -57,25 +56,15 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/searchqna:latest#image: opea/searchqna:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/searchqna-ui:latest#image: opea/searchqna-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
n=0
|
||||
until [[ "$n" -ge 200 ]]; do
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs tgi-service > $LOG_PATH/tgi_service_start.log
|
||||
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
}
|
||||
@@ -97,28 +86,31 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
source activate ${conda_env_name}
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
exit_status=0
|
||||
npx playwright test || exit_status=$?
|
||||
exit_status=0
|
||||
npx playwright test || exit_status=$?
|
||||
|
||||
if [ $exit_status -ne 0 ]; then
|
||||
if [ $exit_status -ne 0 ]; then
|
||||
echo "[TEST INFO]: ---------frontend test failed---------"
|
||||
exit $exit_status
|
||||
else
|
||||
else
|
||||
echo "[TEST INFO]: ---------frontend test passed---------"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_docker() {
|
||||
@@ -129,7 +121,7 @@ function stop_docker() {
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_megaservice
|
||||
|
||||
18
Translation/docker/docker_build_compose.yaml
Normal file
18
Translation/docker/docker_build_compose.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
translation:
|
||||
build:
|
||||
args:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/translation:${TAG:-latest}
|
||||
translation-ui:
|
||||
build:
|
||||
context: ui
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: translation
|
||||
image: ${REGISTRY:-opea}/translation-ui:${TAG:-latest}
|
||||
@@ -1,18 +1,5 @@
|
||||
# Copyright (c) 2024 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3.8"
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
@@ -29,7 +16,7 @@ services:
|
||||
- "./data:/data"
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -43,7 +30,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
translation-gaudi-backend-server:
|
||||
image: opea/translation:latest
|
||||
image: ${REGISTRY:-opea}/translation:${TAG:-latest}
|
||||
container_name: translation-gaudi-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -58,7 +45,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
translation-gaudi-ui-server:
|
||||
image: opea/translation-ui:latest
|
||||
image: ${REGISTRY:-opea}/translation-ui:${TAG:-latest}
|
||||
container_name: translation-gaudi-ui-server
|
||||
depends_on:
|
||||
- translation-gaudi-backend-server
|
||||
|
||||
@@ -1,18 +1,5 @@
|
||||
# Copyright (c) 2024 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3.8"
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
tgi-service:
|
||||
@@ -30,7 +17,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -44,7 +31,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
translation-xeon-backend-server:
|
||||
image: opea/translation:latest
|
||||
image: ${REGISTRY:-opea}/translation:${TAG:-latest}
|
||||
container_name: translation-xeon-backend-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
@@ -59,7 +46,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
translation-xeon-ui-server:
|
||||
image: opea/translation-ui:latest
|
||||
image: ${REGISTRY:-opea}/translation-ui:${TAG:-latest}
|
||||
container_name: translation-xeon-ui-server
|
||||
depends_on:
|
||||
- translation-xeon-backend-server
|
||||
|
||||
@@ -3,25 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/translation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="translation translation-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -37,16 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/translation:latest#image: opea/translation:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/translation-ui:latest#image: opea/translation-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -126,13 +117,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -156,7 +150,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -3,24 +3,26 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -x
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
|
||||
cd $WORKPATH/docker
|
||||
docker build --no-cache -t opea/translation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
cd $WORKPATH/docker/ui
|
||||
docker build --no-cache -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile .
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="translation translation-ui llm-tgi"
|
||||
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:1.4
|
||||
docker images
|
||||
}
|
||||
|
||||
@@ -36,16 +38,6 @@ function start_services() {
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
|
||||
if [[ "$IMAGE_REPO" != "" ]]; then
|
||||
# Replace the container name with a test-specific name
|
||||
echo "using image repository $IMAGE_REPO and image tag $IMAGE_TAG"
|
||||
sed -i "s#image: opea/translation:latest#image: opea/translation:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/translation-ui:latest#image: opea/translation-ui:${IMAGE_TAG}#g" compose.yaml
|
||||
sed -i "s#image: opea/*#image: ${IMAGE_REPO}opea/#g" compose.yaml
|
||||
echo "cat compose.yaml"
|
||||
cat compose.yaml
|
||||
fi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
@@ -123,13 +115,16 @@ function validate_frontend() {
|
||||
cd $WORKPATH/docker/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
# conda remove -n ${conda_env_name} --all -y
|
||||
# conda create -n ${conda_env_name} python=3.12 -y
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
# conda install -c conda-forge nodejs -y
|
||||
conda install -c conda-forge nodejs -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
@@ -153,7 +148,7 @@ function main() {
|
||||
|
||||
stop_docker
|
||||
|
||||
if [[ "$IMAGE_REPO" == "" ]]; then build_docker_images; fi
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
llava-tgi-service:
|
||||
image: opea/llava-tgi:latest
|
||||
image: ${REGISTRY:-opea}/llava-tgi:${TAG:-latest}
|
||||
container_name: tgi-llava-gaudi-server
|
||||
ports:
|
||||
- "8399:80"
|
||||
@@ -26,7 +23,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192
|
||||
lvm-tgi:
|
||||
image: opea/lvm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
|
||||
container_name: lvm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
@@ -42,7 +39,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
visualqna-gaudi-backend-server:
|
||||
image: opea/visualqna:latest
|
||||
image: ${REGISTRY:-opea}/visualqna:${TAG:-latest}
|
||||
container_name: visualqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
@@ -58,7 +55,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
visualqna-gaudi-ui-server:
|
||||
image: opea/visualqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/visualqna-ui:${TAG:-latest}
|
||||
container_name: visualqna-gaudi-ui-server
|
||||
depends_on:
|
||||
- visualqna-gaudi-backend-server
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
llava-tgi-service:
|
||||
image: opea/llava-tgi-xeon:latest
|
||||
image: ${REGISTRY:-opea}/llava-tgi-xeon:${TAG:-latest}
|
||||
container_name: tgi-llava-xeon-server
|
||||
ports:
|
||||
- "9399:80"
|
||||
@@ -21,7 +18,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --cuda-graphs 0
|
||||
lvm-tgi:
|
||||
image: opea/lvm-tgi:latest
|
||||
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
|
||||
container_name: lvm-tgi-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
@@ -37,7 +34,7 @@ services:
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
visualqna-xeon-backend-server:
|
||||
image: opea/visualqna:latest
|
||||
image: ${REGISTRY:-opea}/visualqna:${TAG:-latest}
|
||||
container_name: visualqna-xeon-backend-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
@@ -53,7 +50,7 @@ services:
|
||||
ipc: host
|
||||
restart: always
|
||||
visualqna-xeon-ui-server:
|
||||
image: opea/visualqna-ui:latest
|
||||
image: ${REGISTRY:-opea}/visualqna-ui:${TAG:-latest}
|
||||
container_name: visualqna-xeon-ui-server
|
||||
depends_on:
|
||||
- visualqna-xeon-backend-server
|
||||
|
||||
Reference in New Issue
Block a user