Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77ba9139a1 | ||
|
|
1b307832d7 | ||
|
|
2e62ecc18a | ||
|
|
84a91bb990 | ||
|
|
7dd0506e08 | ||
|
|
ca6a8f8e1d | ||
|
|
295b81823c | ||
|
|
960cf38d33 | ||
|
|
afcb3a3523 | ||
|
|
3ffe19eba1 | ||
|
|
6f5a9932f7 | ||
|
|
83e6a23441 | ||
|
|
7f19e8b546 | ||
|
|
81ceb26c3f | ||
|
|
2f472315fd | ||
|
|
6a3e9dbc18 | ||
|
|
6b76a93eb7 | ||
|
|
b4d8e1a19b | ||
|
|
b994bc8731 | ||
|
|
d9b62a5a62 | ||
|
|
409c72350e | ||
|
|
e32a51451c | ||
|
|
e948a7f81b | ||
|
|
e80e567817 | ||
|
|
4fecd6a850 | ||
|
|
01eed84db1 | ||
|
|
a0b94b5401 | ||
|
|
a1a384e1fa | ||
|
|
654e2a0d72 | ||
|
|
c3b641f8a8 | ||
|
|
2b51374416 | ||
|
|
961abb3c05 | ||
|
|
2fb070dbfd | ||
|
|
c5f3095ea5 | ||
|
|
2a48601227 | ||
|
|
240587932b | ||
|
|
f2a94377aa | ||
|
|
5ade6865c9 | ||
|
|
29de55da3c | ||
|
|
99eb6a6a7e | ||
|
|
5715e9757e | ||
|
|
4d36def840 | ||
|
|
02c7baae2b | ||
|
|
60b1696530 | ||
|
|
b967f60536 | ||
|
|
e3289477b0 | ||
|
|
44c5cb71fa | ||
|
|
4d08310fdb | ||
|
|
26d6ea4724 | ||
|
|
4250048b18 | ||
|
|
422b4bc56b | ||
|
|
0c7f23cdc9 | ||
|
|
669ed25e97 | ||
|
|
5c59dce71d | ||
|
|
8a5ef62d2a |
64
.github/workflows/AudioQnA.yml
vendored
@@ -1,64 +0,0 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: AudioQnA-test
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- AudioQnA/**
|
||||
- "!**.md"
|
||||
- "!**/ui/**"
|
||||
- .github/workflows/AudioQnA.yml
|
||||
workflow_dispatch:
|
||||
|
||||
# If there is a new commit, the previous jobs will be canceled
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
AudioQnA:
|
||||
runs-on: aise-cluster
|
||||
strategy:
|
||||
matrix:
|
||||
job_name: ["langchain"]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/pull/${{ github.event.number }}/merge"
|
||||
|
||||
- name: Run Test ASR
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/AudioQnA/tests
|
||||
bash test_asr.sh
|
||||
|
||||
- name: Run Test TTS
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/AudioQnA/tests
|
||||
bash test_tts.sh
|
||||
|
||||
- name: Run Test LLM engine
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/AudioQnA/tests
|
||||
bash test_${{ matrix.job_name }}_inference.sh
|
||||
|
||||
- name: Publish pipeline artifact
|
||||
if: ${{ !cancelled() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.job_name }}
|
||||
path: ${{ github.workspace }}/AudioQnA/tests/*.log
|
||||
54
.github/workflows/SearchQnA.yml
vendored
@@ -1,54 +0,0 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: SearchQnA-test
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- SearchQnA/**
|
||||
- "!**.md"
|
||||
- "!**/ui/**"
|
||||
- .github/workflows/SearchQnA.yml
|
||||
workflow_dispatch:
|
||||
|
||||
# If there is a new commit, the previous jobs will be canceled
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
SearchQnA:
|
||||
runs-on: aise-cluster
|
||||
strategy:
|
||||
matrix:
|
||||
job_name: ["langchain"]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/pull/${{ github.event.number }}/merge"
|
||||
- name: Run Test
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
AISE_GAUDI_00_IP: ${{ secrets.AISE_GAUDI_00_IP }}
|
||||
AISE_CLUSTER_01_2_IP: ${{ secrets.AISE_CLUSTER_01_2_IP }}
|
||||
AISE_CLUSTER_01_3_IP: ${{ secrets.AISE_CLUSTER_01_3_IP }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/SearchQnA/tests
|
||||
bash test_${{ matrix.job_name }}_inference.sh
|
||||
|
||||
- name: Publish pipeline artifact
|
||||
if: ${{ !cancelled() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.job_name }}
|
||||
path: ${{ github.workspace }}/SearchQnA/tests/*.log
|
||||
50
.github/workflows/Translation.yml
vendored
@@ -1,50 +0,0 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Translation-test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- Translation/**
|
||||
- "!**.md"
|
||||
- "!**/ui/**"
|
||||
- .github/workflows/Translation.yml
|
||||
workflow_dispatch:
|
||||
|
||||
# If there is a new commit, the previous jobs will be canceled
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
Translation:
|
||||
runs-on: aise-cluster
|
||||
strategy:
|
||||
matrix:
|
||||
job_name: ["langchain"]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/pull/${{ github.event.number }}/merge"
|
||||
|
||||
- name: Run Test
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/Translation/tests
|
||||
bash test_${{ matrix.job_name }}_inference.sh
|
||||
|
||||
- name: Publish pipeline artifact
|
||||
if: ${{ !cancelled() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.job_name }}
|
||||
path: ${{ github.workspace }}/Translation/tests/*.log
|
||||
50
.github/workflows/bum_list_check.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Check Requirements
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
check-requirements:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Save PR requirements
|
||||
run: |
|
||||
find . -name "requirements.txt" -exec cat {} \; | \
|
||||
grep -v '^\s*#' | \
|
||||
grep -v '^\s*$' | \
|
||||
grep -v '^\s*-' | \
|
||||
sed 's/^\s*//' | \
|
||||
awk -F'[>=<]' '{print $1}' | \
|
||||
sort -u > pr-requirements.txt
|
||||
cat pr-requirements.txt
|
||||
|
||||
- name: Checkout main branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
path: main-branch
|
||||
|
||||
- name: Save main branch requirements
|
||||
run: |
|
||||
find ./main-branch -name "requirements.txt" -exec cat {} \; | \
|
||||
grep -v '^\s*#' | \
|
||||
grep -v '^\s*$' | \
|
||||
grep -v '^\s*-' | \
|
||||
sed 's/^\s*//' | \
|
||||
awk -F'[>=<]' '{print $1}' | \
|
||||
sort -u > main-requirements.txt
|
||||
cat main-requirements.txt
|
||||
|
||||
- name: Compare requirements
|
||||
run: |
|
||||
comm -23 pr-requirements.txt main-requirements.txt > added-packages.txt
|
||||
if [ -s added-packages.txt ]; then
|
||||
echo "New packages found in PR:" && cat added-packages.txt
|
||||
else
|
||||
echo "No new packages found😊."
|
||||
fi
|
||||
@@ -13,7 +13,7 @@ on:
|
||||
- "**/ui/**"
|
||||
- "!**.md"
|
||||
- "!**.txt"
|
||||
- .github/workflows/E2E_test_with_compose.yml
|
||||
- .github/workflows/docker-compose-e2e.yml
|
||||
workflow_dispatch:
|
||||
|
||||
# If there is a new commit, the previous jobs will be canceled
|
||||
@@ -23,40 +23,22 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: Get-test-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
run_matrix: ${{ steps.get-test-matrix.outputs.run_matrix }}
|
||||
steps:
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/pull/${{ github.event.number }}/merge"
|
||||
fetch-depth: 0
|
||||
uses: ./.github/workflows/reuse-get-test-matrix.yml
|
||||
with:
|
||||
diff_excluded_files: '.github|README.md|*.txt|deprecate|kubernetes|manifest|gmc|assets'
|
||||
|
||||
- name: Get test matrix
|
||||
id: get-test-matrix
|
||||
run: |
|
||||
set -xe
|
||||
merged_commit=$(git log -1 --format='%H')
|
||||
changed_files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${merged_commit} | \
|
||||
grep -vE '.github|README.md|*.txt|deprecate|kubernetes|manifest')
|
||||
examples=$(printf '%s\n' "${changed_files[@]}" | grep '/' | cut -d'/' -f1 | sort -u)
|
||||
run_matrix="{\"include\":["
|
||||
for example in ${examples}; do
|
||||
run_hardware=""
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c gaudi) != 0 ]; then run_hardware="gaudi"; fi
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c xeon) != 0 ]; then run_hardware="xeon ${run_hardware}"; fi
|
||||
if [ "$run_hardware" = "" ]; then run_hardware="xeon"; fi
|
||||
for hw in ${run_hardware}; do
|
||||
run_matrix="${run_matrix}{\"example\":\"${example}\",\"hardware\":\"${hw}\"},"
|
||||
done
|
||||
done
|
||||
run_matrix=$run_matrix"]}"
|
||||
echo "run_matrix=${run_matrix}" >> $GITHUB_OUTPUT
|
||||
mega-image-build:
|
||||
needs: job1
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
uses: ./.github/workflows/reuse-image-build.yml
|
||||
with:
|
||||
image_tag: ${{ github.event.pull_request.head.sha }}
|
||||
mega_service: "${{ matrix.example }}"
|
||||
runner_label: "docker-build-${{ matrix.hardware }}"
|
||||
|
||||
Example-test:
|
||||
needs: job1
|
||||
needs: [job1, mega-image-build]
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
runs-on: ${{ matrix.hardware }}
|
||||
@@ -77,10 +59,17 @@ jobs:
|
||||
- name: Run test
|
||||
env:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
example: ${{ matrix.example }}
|
||||
hardware: ${{ matrix.hardware }}
|
||||
IMAGE_TAG: ${{ needs.mega-image-build.outputs.image_tag }}
|
||||
IMAGE_REPO_GAUDI: ${{ vars.IMAGE_REPO_GAUDI }}
|
||||
IMAGE_REPO_XEON: ${{ vars.IMAGE_REPO_XEON }}
|
||||
run: |
|
||||
cd ${{ github.workspace }}/$example/tests
|
||||
if [ "$hardware" == "gaudi" ]; then IMAGE_REPO=$IMAGE_REPO_GAUDI; else IMAGE_REPO=$IMAGE_REPO_XEON; fi
|
||||
export IMAGE_REPO=${IMAGE_REPO}
|
||||
example_l=$(echo $example | tr '[:upper:]' '[:lower:]')
|
||||
if [ -f test_${example_l}_on_${hardware}.sh ]; then timeout 30m bash test_${example_l}_on_${hardware}.sh; else echo "Test script not found, skip test!"; fi
|
||||
|
||||
104
.github/workflows/gmc-e2e.yaml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: E2E test with GMC
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- "**/kubernetes/**"
|
||||
- "**/tests/test_gmc**"
|
||||
- "!**.md"
|
||||
- "!**.txt"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
uses: ./.github/workflows/reuse-get-test-matrix.yml
|
||||
with:
|
||||
diff_excluded_files: '.github|deprecated|docker|assets|*.md|*.txt'
|
||||
xeon_server_label: 'xeon'
|
||||
gaudi_server_label: 'gaudi'
|
||||
|
||||
gmc-test:
|
||||
needs: [job1]
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
runs-on: "k8s-${{ matrix.hardware }}"
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: E2e test gmc
|
||||
run: |
|
||||
echo "Matrix - gmc: ${{ matrix.example }}"
|
||||
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set variables
|
||||
run: |
|
||||
if [ ${{ matrix.hardware }} == "gaudi" ]; then IMAGE_REPO=${{ vars.IMAGE_REPO_GAUDI }}; else IMAGE_REPO=${{ vars.IMAGE_REPO_XEON }}; fi
|
||||
echo "IMAGE_REPO=$OPEA_IMAGE_REPO" >> $GITHUB_ENV
|
||||
lower_example=$(echo "${{ matrix.example }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "SYSTEM_NAMESPACE=opea-system-$(date +%Y%m%d%H%M%S)" >> $GITHUB_ENV
|
||||
echo "APP_NAMESPACE=$lower_example-$(date +%Y%m%d%H%M%S)" >> $GITHUB_ENV
|
||||
echo "ROLLOUT_TIMEOUT_SECONDS=1800s" >> $GITHUB_ENV
|
||||
echo "KUBECTL_TIMEOUT_SECONDS=60s" >> $GITHUB_ENV
|
||||
echo "continue_test=true" >> $GITHUB_ENV
|
||||
echo "should_cleanup=false" >> $GITHUB_ENV
|
||||
echo "skip_validate=true" >> $GITHUB_ENV
|
||||
echo "APP_NAMESPACE=$APP_NAMESPACE"
|
||||
|
||||
- name: Kubectl install
|
||||
id: install
|
||||
run: |
|
||||
if [[ ! -f ${{ github.workspace }}/${{ matrix.example }}/tests/test_gmc_on_${{ matrix.hardware }}.sh ]]; then
|
||||
echo "No test script found, exist test!"
|
||||
exit 0
|
||||
else
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_gmc_on_${{ matrix.hardware }}.sh init_${{ matrix.example }}
|
||||
echo "should_cleanup=true" >> $GITHUB_ENV
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_gmc_on_${{ matrix.hardware }}.sh install_${{ matrix.example }}
|
||||
echo "Testing ${{ matrix.example }}, waiting for pod ready..."
|
||||
if kubectl rollout status deployment --namespace "$APP_NAMESPACE" --timeout "$ROLLOUT_TIMEOUT_SECONDS"; then
|
||||
echo "Testing gmc ${{ matrix.example }}, waiting for pod ready done!"
|
||||
echo "skip_validate=false" >> $GITHUB_ENV
|
||||
else
|
||||
echo "Timeout waiting for pods in namespace $APP_NAMESPACE to be ready!"
|
||||
exit 1
|
||||
fi
|
||||
sleep 60
|
||||
fi
|
||||
|
||||
- name: Validate e2e test
|
||||
if: always()
|
||||
run: |
|
||||
if $skip_validate; then
|
||||
echo "Skip validate"
|
||||
else
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_gmc_on_${{ matrix.hardware }}.sh validate_${{ matrix.example }}
|
||||
fi
|
||||
|
||||
- name: Kubectl uninstall
|
||||
if: always()
|
||||
run: |
|
||||
if $should_cleanup; then
|
||||
if ! kubectl delete ns $SYSTEM_NAMESPACE --timeout=$KUBECTL_TIMEOUT_SECONDS; then
|
||||
kubectl delete pods --namespace $SYSTEM_NAMESPACE --force --grace-period=0 --all
|
||||
kubectl delete ns $SYSTEM_NAMESPACE --force --grace-period=0 --timeout=$KUBECTL_TIMEOUT_SECONDS
|
||||
fi
|
||||
if ! kubectl delete ns $APP_NAMESPACE --timeout=$KUBECTL_TIMEOUT_SECONDS; then
|
||||
kubectl delete pods --namespace $APP_NAMESPACE --force --grace-period=0 --all
|
||||
kubectl delete ns $APP_NAMESPACE --force --grace-period=0 --timeout=$KUBECTL_TIMEOUT_SECONDS
|
||||
fi
|
||||
fi
|
||||
30
.github/workflows/image-build-on-push.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Test
|
||||
name: Build latest images on push event
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
paths:
|
||||
- "**/docker/*.py"
|
||||
- "**/docker/Dockerfile"
|
||||
- "**/docker/ui/**"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-on-push
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
uses: ./.github/workflows/reuse-get-test-matrix.yml
|
||||
|
||||
mega-image-build:
|
||||
needs: job1
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
uses: ./.github/workflows/reuse-image-build.yml
|
||||
with:
|
||||
image-tag: latest
|
||||
mega-service: "${{ matrix.example }}"
|
||||
127
.github/workflows/manifest-e2e.yaml
vendored
@@ -1,127 +0,0 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: E2E test with manifests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- "**/kubernetes/manifests/**"
|
||||
- "**/tests/**"
|
||||
- "!**.md"
|
||||
- "!**.txt"
|
||||
- .github/workflows/manifest-e2e.yml
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: Get-test-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
run_matrix: ${{ steps.get-test-matrix.outputs.run_matrix }}
|
||||
steps:
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get test matrix
|
||||
id: get-test-matrix
|
||||
run: |
|
||||
set -xe
|
||||
changed_files="$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }} | \
|
||||
grep "/kubernetes/manifests/" | \
|
||||
grep -vE '.github|deprecated|docker')" || true
|
||||
examples=$(printf '%s\n' "${changed_files[@]}" | grep '/' | cut -d'/' -f1 | sort -u)
|
||||
run_matrix="{\"include\":["
|
||||
for example in ${examples}; do
|
||||
run_hardware=""
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c gaudi) != 0 ]; then run_hardware="gaudi"; fi
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c xeon) != 0 ]; then run_hardware="xeon ${run_hardware}"; fi
|
||||
if [[ -z "$run_hardware" ]]; then run_hardware="xeon"; fi
|
||||
for hw in ${run_hardware}; do
|
||||
if [ $hw = "gaudi" ]; then
|
||||
continue # skip gaudi for K8s test temporarily
|
||||
else
|
||||
#lower_example=$(echo "${example}" | tr '[:upper:]' '[:lower:]')
|
||||
run_matrix="${run_matrix}{\"example\":\"${example}\",\"hardware\":\"inspur-icx-1\"},"
|
||||
fi
|
||||
done
|
||||
done
|
||||
run_matrix=$run_matrix"]}"
|
||||
echo "run_matrix=${run_matrix}" >> $GITHUB_OUTPUT
|
||||
|
||||
manifest-test:
|
||||
needs: job1
|
||||
if: always() && ${{ needs.job1.outputs.run_matrix.include.length }} > 0
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
runs-on: ${{ matrix.hardware }}
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: E2e test manifest
|
||||
run: |
|
||||
echo "Matrix - manifest: ${{ matrix.example }}"
|
||||
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set variables
|
||||
run: |
|
||||
lower_example=$(echo "${{ matrix.example }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "NAMESPACE=$lower_example-$(date +%Y%m%d%H%M%S)" >> $GITHUB_ENV
|
||||
echo "ROLLOUT_TIMEOUT_SECONDS=1800s" >> $GITHUB_ENV
|
||||
echo "KUBECTL_TIMEOUT_SECONDS=60s" >> $GITHUB_ENV
|
||||
echo "should_cleanup=false" >> $GITHUB_ENV
|
||||
echo "skip_validate=false" >> $GITHUB_ENV
|
||||
echo "NAMESPACE=$NAMESPACE"
|
||||
|
||||
- name: Initialize manifest testing
|
||||
run: |
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_xeon.sh init_${{ matrix.example }}
|
||||
|
||||
- name: Kubectl install
|
||||
id: install
|
||||
run: |
|
||||
echo "should_cleanup=true" >> $GITHUB_ENV
|
||||
kubectl create ns $NAMESPACE
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_xeon.sh install_${{ matrix.example }} $NAMESPACE
|
||||
echo "Testing ${{ matrix.example }}, waiting for pod ready..."
|
||||
if kubectl rollout status deployment --namespace "$NAMESPACE" --timeout "$ROLLOUT_TIMEOUT_SECONDS"; then
|
||||
echo "Testing manifests ${{ matrix.example }}, waiting for pod ready done!"
|
||||
else
|
||||
echo "Timeout waiting for pods in namespace $NAMESPACE to be ready!"
|
||||
echo "skip_validate=true" >> $GITHUB_ENV
|
||||
exit 1
|
||||
fi
|
||||
sleep 60
|
||||
|
||||
- name: Validate e2e test
|
||||
if: always()
|
||||
run: |
|
||||
if $skip_validate; then
|
||||
echo "Skip validate"
|
||||
else
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_xeon.sh validate_${{ matrix.example }} $NAMESPACE
|
||||
fi
|
||||
|
||||
- name: Kubectl uninstall
|
||||
if: always()
|
||||
run: |
|
||||
if $should_cleanup; then
|
||||
if ! kubectl delete ns $NAMESPACE --timeout=$KUBECTL_TIMEOUT_SECONDS; then
|
||||
kubectl delete pods --namespace $NAMESPACE --force --grace-period=0 --all
|
||||
kubectl delete ns $NAMESPACE --force --grace-period=0 --timeout=$KUBECTL_TIMEOUT_SECONDS
|
||||
fi
|
||||
fi
|
||||
111
.github/workflows/manifest-e2e.yml
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: E2E test with manifests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- "**/kubernetes/manifests/**"
|
||||
- "**/tests/test_manifest**"
|
||||
- "!**.md"
|
||||
- "!**.txt"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
uses: ./.github/workflows/reuse-get-test-matrix.yml
|
||||
with:
|
||||
diff_excluded_files: '.github|deprecated|docker|assets|*.md|*.txt'
|
||||
xeon_server_label: 'xeon'
|
||||
gaudi_server_label: 'gaudi'
|
||||
|
||||
mega-image-build:
|
||||
needs: job1
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
uses: ./.github/workflows/reuse-image-build.yml
|
||||
with:
|
||||
image_tag: ${{ github.event.pull_request.head.sha }}
|
||||
mega_service: "${{ matrix.example }}"
|
||||
runner_label: "docker-build-${{ matrix.hardware }}"
|
||||
|
||||
manifest-test:
|
||||
needs: [job1, mega-image-build]
|
||||
strategy:
|
||||
matrix: ${{ fromJSON(needs.job1.outputs.run_matrix) }}
|
||||
runs-on: "k8s-${{ matrix.hardware }}"
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: E2e test manifest
|
||||
run: |
|
||||
echo "Matrix - manifest: ${{ matrix.example }}"
|
||||
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set variables
|
||||
run: |
|
||||
if [ ${{ matrix.hardware }} == "gaudi" ]; then IMAGE_REPO=${{ vars.IMAGE_REPO_GAUDI }}; else IMAGE_REPO=${{ vars.IMAGE_REPO_XEON }}; fi
|
||||
echo "IMAGE_REPO=$OPEA_IMAGE_REPO" >> $GITHUB_ENV
|
||||
echo "IMAGE_TAG=${{needs.mega-image-build.outputs.image_tag}}" >> $GITHUB_ENV
|
||||
lower_example=$(echo "${{ matrix.example }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "NAMESPACE=$lower_example-$(date +%Y%m%d%H%M%S)" >> $GITHUB_ENV
|
||||
echo "ROLLOUT_TIMEOUT_SECONDS=1800s" >> $GITHUB_ENV
|
||||
echo "KUBECTL_TIMEOUT_SECONDS=60s" >> $GITHUB_ENV
|
||||
echo "continue_test=true" >> $GITHUB_ENV
|
||||
echo "should_cleanup=false" >> $GITHUB_ENV
|
||||
echo "skip_validate=true" >> $GITHUB_ENV
|
||||
echo "NAMESPACE=$NAMESPACE"
|
||||
|
||||
- name: Kubectl install
|
||||
id: install
|
||||
run: |
|
||||
if [[ ! -f ${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_${{ matrix.hardware }}.sh ]]; then
|
||||
echo "No test script found, exist test!"
|
||||
exit 0
|
||||
else
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_${{ matrix.hardware }}.sh init_${{ matrix.example }}
|
||||
echo "should_cleanup=true" >> $GITHUB_ENV
|
||||
kubectl create ns $NAMESPACE
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_${{ matrix.hardware }}.sh install_${{ matrix.example }} $NAMESPACE
|
||||
echo "Testing ${{ matrix.example }}, waiting for pod ready..."
|
||||
if kubectl rollout status deployment --namespace "$NAMESPACE" --timeout "$ROLLOUT_TIMEOUT_SECONDS"; then
|
||||
echo "Testing manifests ${{ matrix.example }}, waiting for pod ready done!"
|
||||
echo "skip_validate=false" >> $GITHUB_ENV
|
||||
else
|
||||
echo "Timeout waiting for pods in namespace $NAMESPACE to be ready!"
|
||||
exit 1
|
||||
fi
|
||||
sleep 60
|
||||
fi
|
||||
|
||||
- name: Validate e2e test
|
||||
if: always()
|
||||
run: |
|
||||
if $skip_validate; then
|
||||
echo "Skip validate"
|
||||
else
|
||||
${{ github.workspace }}/${{ matrix.example }}/tests/test_manifest_on_${{ matrix.hardware }}.sh validate_${{ matrix.example }} $NAMESPACE
|
||||
fi
|
||||
|
||||
- name: Kubectl uninstall
|
||||
if: always()
|
||||
run: |
|
||||
if $should_cleanup; then
|
||||
if ! kubectl delete ns $NAMESPACE --timeout=$KUBECTL_TIMEOUT_SECONDS; then
|
||||
kubectl delete pods --namespace $NAMESPACE --force --grace-period=0 --all
|
||||
kubectl delete ns $NAMESPACE --force --grace-period=0 --timeout=$KUBECTL_TIMEOUT_SECONDS
|
||||
fi
|
||||
fi
|
||||
@@ -9,7 +9,7 @@ on:
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- "**/kubernetes/manifests/**"
|
||||
- .github/workflows/manifest-validate.yaml
|
||||
- .github/workflows/manifest-validate.yml
|
||||
workflow_dispatch:
|
||||
|
||||
# If there is a new commit, the previous jobs will be canceled
|
||||
44
.github/workflows/path_detection.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Check for missing Dockerfile paths in repo comps
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize]
|
||||
|
||||
jobs:
|
||||
check-dockerfile-paths:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Clean Up Working Directory
|
||||
run: sudo rm -rf ${{github.workspace}}/*
|
||||
|
||||
- name: Checkout repo GenAIExamples
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Clone repo GenAIComps
|
||||
run: |
|
||||
cd ..
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
- name: Check for missing Dockerfile paths in GenAIComps
|
||||
run: |
|
||||
cd ${{github.workspace}}
|
||||
miss="FALSE"
|
||||
while IFS=: read -r file line content; do
|
||||
dockerfile_path=$(echo "$content" | awk -F '-f ' '{print $2}' | awk '{print $1}')
|
||||
if [[ ! -f "../GenAIComps/${dockerfile_path}" ]]; then
|
||||
miss="TRUE"
|
||||
echo "Missing Dockerfile: GenAIComps/${dockerfile_path} (Referenced in GenAIExamples/${file}:${line})"
|
||||
fi
|
||||
done < <(grep -Ern 'docker build .* -f comps/.+/Dockerfile' --include='*.md' .)
|
||||
|
||||
|
||||
if [[ "$miss" == "TRUE" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shell: bash
|
||||
77
.github/workflows/reuse-get-test-matrix.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Support push and pull_request events
|
||||
name: Get Test Matrix
|
||||
permissions: read-all
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
diff_excluded_files:
|
||||
required: false
|
||||
type: string
|
||||
default: '.github|README.md|*.txt'
|
||||
xeon_server_label:
|
||||
required: false
|
||||
type: string
|
||||
default: 'xeon'
|
||||
gaudi_server_label:
|
||||
required: false
|
||||
type: string
|
||||
default: 'gaudi'
|
||||
outputs:
|
||||
run_matrix:
|
||||
description: "The matrix string"
|
||||
value: ${{ jobs.job1.outputs.run_matrix }}
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: Get-test-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
run_matrix: ${{ steps.get-test-matrix.outputs.run_matrix }}
|
||||
steps:
|
||||
- name: Get checkout ref
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "pull_request" ] || [ "${{ github.event_name }}" == "pull_request_target" ]; then
|
||||
echo "CHECKOUT_REF=refs/pull/${{ github.event.number }}/merge" >> $GITHUB_ENV
|
||||
else
|
||||
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
||||
fi
|
||||
echo "checkout ref ${{ env.CHECKOUT_REF }}"
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.CHECKOUT_REF }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get test matrix
|
||||
id: get-test-matrix
|
||||
run: |
|
||||
set -xe
|
||||
if [ "${{ github.event_name }}" == "pull_request" ] || [ "${{ github.event_name }}" == "pull_request_target" ]; then
|
||||
base_commit=${{ github.event.pull_request.base.sha }}
|
||||
else
|
||||
base_commit=$(git rev-parse HEAD~1) # push event
|
||||
fi
|
||||
merged_commit=$(git log -1 --format='%H')
|
||||
changed_files="$(git diff --name-only ${base_commit} ${merged_commit} | \
|
||||
grep -vE '${{ inputs.diff_excluded_files }}')" || true
|
||||
examples=$(printf '%s\n' "${changed_files[@]}" | grep '/' | cut -d'/' -f1 | sort -u)
|
||||
run_matrix="{\"include\":["
|
||||
for example in ${examples}; do
|
||||
run_hardware=""
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c gaudi) != 0 ]; then run_hardware="gaudi"; fi
|
||||
if [ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | grep -c xeon) != 0 ]; then run_hardware="xeon ${run_hardware}"; fi
|
||||
if [ "$run_hardware" == "" ]; then run_hardware="gaudi"; fi
|
||||
for hw in ${run_hardware}; do
|
||||
if [ "$hw" == "gaudi" ] && [ "${{ inputs.gaudi_server_label }}" != "" ]; then
|
||||
run_matrix="${run_matrix}{\"example\":\"${example}\",\"hardware\":\"${{ inputs.gaudi_server_label }}\"},"
|
||||
elif [ "${{ inputs.xeon_server_label }}" != "" ]; then
|
||||
run_matrix="${run_matrix}{\"example\":\"${example}\",\"hardware\":\"${{ inputs.xeon_server_label }}\"},"
|
||||
fi
|
||||
done
|
||||
done
|
||||
run_matrix=$run_matrix"]}"
|
||||
echo "run_matrix=${run_matrix}" >> $GITHUB_OUTPUT
|
||||
64
.github/workflows/reuse-image-build.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Image Build
|
||||
permissions: read-all
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
image_repo:
|
||||
required: false
|
||||
type: string
|
||||
image_tag:
|
||||
required: true
|
||||
type: string
|
||||
mega_service:
|
||||
required: true
|
||||
type: string
|
||||
runner_label:
|
||||
required: false
|
||||
type: string
|
||||
default: 'docker-build-xeon'
|
||||
outputs:
|
||||
image_repo:
|
||||
description: "The image reposity used for the image build"
|
||||
value: ${{ jobs.mega-image-build.outputs.image_repo }}
|
||||
image_tag:
|
||||
description: "The image tag used for the image build"
|
||||
value: ${{ jobs.mega-image-build.outputs.image_tag }}
|
||||
|
||||
jobs:
|
||||
mega-image-build:
|
||||
runs-on: ${{ inputs.runner_label }}
|
||||
outputs:
|
||||
image_repo: ${{ steps.build-megaservice-image.outputs.image_repo }}
|
||||
image_tag: ${{ steps.build-megaservice-image.outputs.image_tag }}
|
||||
steps:
|
||||
- name: Get checkout ref
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "pull_request" ] || [ "${{ github.event_name }}" == "pull_request_target" ]; then
|
||||
echo "CHECKOUT_REF=refs/pull/${{ github.event.number }}/merge" >> $GITHUB_ENV
|
||||
else
|
||||
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
||||
fi
|
||||
echo "checkout ref ${{ env.CHECKOUT_REF }}"
|
||||
|
||||
- name: Checkout out Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.CHECKOUT_REF }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Building MegaService Docker Image
|
||||
id: build-megaservice-image
|
||||
env:
|
||||
IMAGE_REPO: ${{ inputs.image_repo }}
|
||||
IMAGE_TAG: ${{ inputs.image_tag }}
|
||||
MEGA_SERVICE: ${{ inputs.mega_service }}
|
||||
run: |
|
||||
.github/workflows/scripts/build_push.sh ${{ env.MEGA_SERVICE}}
|
||||
if [ -z "${{ env.IMAGE_REPO }}" ]; then
|
||||
IMAGE_REPO=$OPEA_IMAGE_REPO
|
||||
fi
|
||||
echo "IMAGE_TAG=${IMAGE_TAG}"
|
||||
echo "image_tag=$IMAGE_TAG" >> $GITHUB_OUTPUT
|
||||
66
.github/workflows/scripts/build_push.sh
vendored
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
|
||||
IMAGE_REPO=${IMAGE_REPO:-$OPEA_IMAGE_REPO}
|
||||
IMAGE_TAG=${IMAGE_TAG:-latest}
|
||||
|
||||
function getImagenameFromMega() {
|
||||
echo $(echo "$1" | tr '[:upper:]' '[:lower:]')
|
||||
}
|
||||
|
||||
function checkExist() {
|
||||
IMAGE_NAME=$1
|
||||
if [ $(curl -X GET http://localhost:5000/v2/opea/${IMAGE_NAME}/tags/list | grep -c ${IMAGE_TAG}) -ne 0 ]; then
|
||||
echo "true"
|
||||
else
|
||||
echo "false"
|
||||
fi
|
||||
}
|
||||
|
||||
function docker_build() {
|
||||
# check if if IMAGE_TAG is not "latest" and the image exists in the registry
|
||||
if [ "$IMAGE_TAG" != "latest" ] && [ "$(checkExist $1)" == "true" ]; then
|
||||
echo "Image ${IMAGE_REPO}opea/$1:$IMAGE_TAG already exists in the registry"
|
||||
return
|
||||
fi
|
||||
# docker_build <service_name> <dockerfile>
|
||||
if [ -z "$2" ]; then
|
||||
DOCKERFILE_PATH=Dockerfile
|
||||
else
|
||||
DOCKERFILE_PATH=$2
|
||||
fi
|
||||
echo "Building ${IMAGE_REPO}opea/$1:$IMAGE_TAG using Dockerfile $DOCKERFILE_PATH"
|
||||
# if https_proxy and http_proxy are set, pass them to docker build
|
||||
if [ -z "$https_proxy" ]; then
|
||||
docker build --no-cache -t ${IMAGE_REPO}opea/$1:$IMAGE_TAG -f $DOCKERFILE_PATH .
|
||||
else
|
||||
docker build --no-cache -t ${IMAGE_REPO}opea/$1:$IMAGE_TAG --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f $DOCKERFILE_PATH .
|
||||
fi
|
||||
docker push ${IMAGE_REPO}opea/$1:$IMAGE_TAG
|
||||
docker rmi ${IMAGE_REPO}opea/$1:$IMAGE_TAG
|
||||
}
|
||||
|
||||
# $1 is like "apple orange pear"
|
||||
for MEGA_SVC in $1; do
|
||||
case $MEGA_SVC in
|
||||
"ChatQnA"|"CodeGen"|"CodeTrans"|"DocSum"|"Translation")
|
||||
cd $MEGA_SVC/docker
|
||||
IMAGE_NAME="$(getImagenameFromMega $MEGA_SVC)"
|
||||
docker_build ${IMAGE_NAME}
|
||||
cd ui
|
||||
docker_build ${IMAGE_NAME}-ui docker/Dockerfile
|
||||
if [ "$MEGA_SVC" == "ChatQnA" ];then
|
||||
docker_build ${IMAGE_NAME}-conversation-ui docker/Dockerfile.react
|
||||
fi
|
||||
;;
|
||||
"AudioQnA"|"SearchQnA"|"VisualQnA")
|
||||
echo "Not supported yet"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown function: $MEGA_SVC"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
BIN
AudioQnA/assets/img/audio_ui.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
AudioQnA/assets/img/audio_ui_record.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
AudioQnA/assets/img/audioqna.jpg
Normal file
|
After Width: | Height: | Size: 48 KiB |
@@ -1,8 +1,8 @@
|
||||
# AudioQnA
|
||||
|
||||

|
||||

|
||||
|
||||
In this example we will show you how to build an Audio Question and Answering application (AudioQnA). AudioQnA serves like a talking bot, let LLMs talk with users. It basically accepts users' audio inputs, converts to texts and feed to LLMs, gets the text answers and converts back to audio outputs.
|
||||
In this example we will show you how to build an Audio Question and Answering application (AudioQnA). AudioQnA serves like a talking bot, enabling LLMs to talk with users. It basically accepts users' audio inputs, converts to texts and feed to LLMs, gets the text answers and converts back to audio outputs.
|
||||
|
||||
What AudioQnA is delivering and why it stands out:
|
||||
|
||||
@@ -156,7 +156,7 @@ cd ../../
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If you modified any files and want that change introduced in this step, add `--build` to the end of the command to build the container image instead of pulling it from dockerhub.
|
||||
> If you have modified any files and want that change to be introduced in this step, add `--build` to the end of the command to build the container image instead of pulling it from dockerhub.
|
||||
|
||||
## Ingest data into Redis (Optional)
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
import contextlib
|
||||
import os
|
||||
import time
|
||||
import urllib.request
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@@ -19,7 +20,13 @@ from transformers import WhisperForConditionalGeneration, WhisperProcessor
|
||||
class AudioSpeechRecognition:
|
||||
"""Convert audio to text."""
|
||||
|
||||
def __init__(self, model_name_or_path="openai/whisper-small", bf16=False, language=None, device="cpu"):
|
||||
def __init__(self, model_name_or_path="openai/whisper-small", bf16=False, language="english", device="cpu"):
|
||||
if device == "hpu":
|
||||
# Explicitly link HPU with Torch
|
||||
from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi
|
||||
|
||||
adapt_transformers_to_gaudi()
|
||||
|
||||
self.device = device
|
||||
asr_model_name_or_path = os.environ.get("ASR_MODEL_PATH", model_name_or_path)
|
||||
print("Downloading model: {}".format(asr_model_name_or_path))
|
||||
@@ -33,6 +40,12 @@ class AudioSpeechRecognition:
|
||||
self.model = ipex.optimize(self.model, dtype=torch.bfloat16)
|
||||
self.language = language
|
||||
|
||||
if device == "hpu":
|
||||
# do hpu graph warmup with a long enough input audio
|
||||
# whisper has a receptive field of 30 seconds
|
||||
# here we select a relatively long audio (~15 sec) to quickly warmup
|
||||
self._warmup_whisper_hpu_graph("https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/labixiaoxin.wav")
|
||||
|
||||
def _audiosegment_to_librosawav(self, audiosegment):
|
||||
# https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples
|
||||
# This way is faster than librosa.load or HuggingFace Dataset wrapper
|
||||
@@ -45,16 +58,27 @@ class AudioSpeechRecognition:
|
||||
|
||||
return fp_arr
|
||||
|
||||
def _warmup_whisper_hpu_graph(self, url):
|
||||
print("[ASR] fetch warmup audio...")
|
||||
urllib.request.urlretrieve(
|
||||
url,
|
||||
"warmup.wav",
|
||||
)
|
||||
print("[ASR] warmup...")
|
||||
waveform = AudioSegment.from_file("warmup.wav").set_frame_rate(16000)
|
||||
waveform = self._audiosegment_to_librosawav(waveform)
|
||||
# pylint: disable=E1101
|
||||
inputs = self.processor.feature_extractor(
|
||||
waveform, return_tensors="pt", sampling_rate=16_000
|
||||
).input_features.to(self.device)
|
||||
_ = self.model.generate(inputs, language="chinese")
|
||||
|
||||
def audio2text(self, audio_path):
|
||||
"""Convert audio to text.
|
||||
|
||||
audio_path: the path to the input audio, e.g. ~/xxx.mp3
|
||||
"""
|
||||
start = time.time()
|
||||
if audio_path.split(".")[-1] in ["flac", "ogg", "aac", "m4a"]:
|
||||
audio_path = self._convert_audio_type(audio_path)
|
||||
elif audio_path.split(".")[-1] not in ["mp3", "wav"]:
|
||||
raise Exception("[ASR ERROR] Audio format not supported!")
|
||||
|
||||
try:
|
||||
waveform = AudioSegment.from_file(audio_path).set_frame_rate(16000)
|
||||
@@ -69,20 +93,10 @@ class AudioSpeechRecognition:
|
||||
waveform, return_tensors="pt", sampling_rate=16_000
|
||||
).input_features.to(self.device)
|
||||
with torch.cpu.amp.autocast() if self.bf16 else contextlib.nullcontext():
|
||||
if self.language is None:
|
||||
predicted_ids = self.model.generate(inputs)
|
||||
elif self.language == "auto":
|
||||
self.model.config.forced_decoder_ids = None
|
||||
predicted_ids = self.model.generate(inputs)
|
||||
else:
|
||||
self.forced_decoder_ids = self.processor.get_decoder_prompt_ids(
|
||||
language=self.language, task="transcribe"
|
||||
)
|
||||
self.model.config.forced_decoder_ids = self.forced_decoder_ids
|
||||
predicted_ids = self.model.generate(inputs)
|
||||
predicted_ids = self.model.generate(inputs, language=self.language)
|
||||
# pylint: disable=E1101
|
||||
result = self.processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[0]
|
||||
if self.language == "auto" or self.language == "zh":
|
||||
if self.language in ["chinese", "mandarin"]:
|
||||
from zhconv import convert
|
||||
|
||||
result = convert(result, "zh-cn")
|
||||
@@ -91,15 +105,20 @@ class AudioSpeechRecognition:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asr = AudioSpeechRecognition(language="auto")
|
||||
import urllib.request
|
||||
asr = AudioSpeechRecognition(language="english")
|
||||
|
||||
# Test multilanguage asr
|
||||
urllib.request.urlretrieve(
|
||||
"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/labixiaoxin.wav",
|
||||
"sample.wav",
|
||||
)
|
||||
asr.language = "chinese"
|
||||
text = asr.audio2text("sample.wav")
|
||||
|
||||
urllib.request.urlretrieve(
|
||||
"https://github.com/intel/intel-extension-for-transformers/raw/main/intel_extension_for_transformers/neural_chat/assets/audio/sample.wav",
|
||||
"sample.wav",
|
||||
)
|
||||
text = asr.audio2text("sample.wav")
|
||||
import os
|
||||
|
||||
os.remove("sample.wav")
|
||||
print(text)
|
||||
@@ -58,7 +58,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--port", type=int, default=8008)
|
||||
parser.add_argument("--model_name_or_path", type=str, default="openai/whisper-tiny")
|
||||
parser.add_argument("--bf16", default=False, action="store_true")
|
||||
parser.add_argument("--language", type=str, default="auto")
|
||||
parser.add_argument("--language", type=str, default="english")
|
||||
parser.add_argument("--device", type=str, default="cpu")
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -311,7 +311,7 @@ def read_clean_buffer(audio_bytes):
|
||||
|
||||
def cut_text(text, punc):
|
||||
text = re.escape(text)
|
||||
punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}]
|
||||
punc_list = [",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"]
|
||||
if len(punc_list) > 0:
|
||||
punds = r"[" + "".join(punc_list) + r"]"
|
||||
text = text.strip("\n")
|
||||
32
AudioQnA/docker/Dockerfile
Normal file
@@ -0,0 +1,32 @@
|
||||
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM python:3.11-slim
|
||||
|
||||
RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \
|
||||
libgl1-mesa-glx \
|
||||
libjemalloc-dev \
|
||||
vim \
|
||||
git
|
||||
|
||||
RUN useradd -m -s /bin/bash user && \
|
||||
mkdir -p /home/user && \
|
||||
chown -R user /home/user/
|
||||
|
||||
RUN cd /home/user/ && \
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
RUN cd /home/user/GenAIComps && pip install --no-cache-dir --upgrade pip && \
|
||||
pip install -r /home/user/GenAIComps/requirements.txt
|
||||
|
||||
COPY ./audioqna.py /home/user/audioqna.py
|
||||
|
||||
ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps
|
||||
|
||||
USER user
|
||||
|
||||
WORKDIR /home/user
|
||||
|
||||
ENTRYPOINT ["python", "audioqna.py"]
|
||||
58
AudioQnA/docker/audioqna.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from comps import AudioQnAGateway, MicroService, ServiceOrchestrator, ServiceType
|
||||
|
||||
MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
|
||||
MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888))
|
||||
ASR_SERVICE_HOST_IP = os.getenv("ASR_SERVICE_HOST_IP", "0.0.0.0")
|
||||
ASR_SERVICE_PORT = int(os.getenv("ASR_SERVICE_PORT", 9099))
|
||||
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
|
||||
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
|
||||
TTS_SERVICE_HOST_IP = os.getenv("TTS_SERVICE_HOST_IP", "0.0.0.0")
|
||||
TTS_SERVICE_PORT = int(os.getenv("TTS_SERVICE_PORT", 9088))
|
||||
|
||||
|
||||
class AudioQnAService:
|
||||
def __init__(self, host="0.0.0.0", port=8000):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.megaservice = ServiceOrchestrator()
|
||||
|
||||
def add_remote_service(self):
|
||||
asr = MicroService(
|
||||
name="asr",
|
||||
host=ASR_SERVICE_HOST_IP,
|
||||
port=ASR_SERVICE_PORT,
|
||||
endpoint="/v1/audio/transcriptions",
|
||||
use_remote_service=True,
|
||||
service_type=ServiceType.ASR,
|
||||
)
|
||||
llm = MicroService(
|
||||
name="llm",
|
||||
host=LLM_SERVICE_HOST_IP,
|
||||
port=LLM_SERVICE_PORT,
|
||||
endpoint="/v1/chat/completions",
|
||||
use_remote_service=True,
|
||||
service_type=ServiceType.LLM,
|
||||
)
|
||||
tts = MicroService(
|
||||
name="tts",
|
||||
host=TTS_SERVICE_HOST_IP,
|
||||
port=TTS_SERVICE_PORT,
|
||||
endpoint="/v1/audio/speech",
|
||||
use_remote_service=True,
|
||||
service_type=ServiceType.TTS,
|
||||
)
|
||||
self.megaservice.add(asr).add(llm).add(tts)
|
||||
self.megaservice.flow_to(asr, llm)
|
||||
self.megaservice.flow_to(llm, tts)
|
||||
self.gateway = AudioQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
audioqna = AudioQnAService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT)
|
||||
audioqna.add_remote_service()
|
||||
135
AudioQnA/docker/gaudi/README.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Build Mega Service of AudioQnA on Gaudi
|
||||
|
||||
This document outlines the deployment process for a AudioQnA application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi server.
|
||||
|
||||
## 🚀 Build Docker images
|
||||
|
||||
### 1. Source Code install GenAIComps
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
```
|
||||
|
||||
### 2. Build ASR Image
|
||||
|
||||
```bash
|
||||
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/Dockerfile_hpu .
|
||||
|
||||
|
||||
docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile .
|
||||
```
|
||||
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
```bash
|
||||
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/speecht5/Dockerfile_hpu .
|
||||
|
||||
docker build -t opea/tts:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/Dockerfile .
|
||||
```
|
||||
|
||||
### 6. Build MegaService Docker Image
|
||||
|
||||
To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `audioqna.py` Python script. Build the MegaService Docker image using the command below:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/AudioQnA/docker
|
||||
docker build --no-cache -t opea/audioqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have following images ready:
|
||||
|
||||
1. `opea/whisper:latest`
|
||||
2. `opea/asr:latest`
|
||||
3. `opea/llm-tgi:latest`
|
||||
4. `opea/speecht5:latest`
|
||||
5. `opea/tts:latest`
|
||||
6. `opea/audioqna:latest`
|
||||
|
||||
## 🚀 Set the environment variables
|
||||
|
||||
Before starting the services with `docker compose`, you have to recheck the following environment variables.
|
||||
|
||||
```bash
|
||||
export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')
|
||||
export HUGGINGFACEHUB_API_TOKEN=<your HF token>
|
||||
|
||||
export TGI_LLM_ENDPOINT=http://$host_ip:3006
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export ASR_ENDPOINT=http://$host_ip:7066
|
||||
export TTS_ENDPOINT=http://$host_ip:7055
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export ASR_SERVICE_HOST_IP=${host_ip}
|
||||
export TTS_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
export LLM_SERVICE_PORT=3007
|
||||
```
|
||||
|
||||
## 🚀 Start the MegaService
|
||||
|
||||
```bash
|
||||
cd GenAIExamples/AudioQnA/docker/gaudi/
|
||||
docker compose -f docker_compose.yaml up -d
|
||||
```
|
||||
|
||||
## 🚀 Test MicroServices
|
||||
|
||||
```bash
|
||||
# whisper service
|
||||
curl http://${host_ip}:7066/v1/asr \
|
||||
-X POST \
|
||||
-d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
# asr microservice
|
||||
curl http://${host_ip}:3001/v1/audio/transcriptions \
|
||||
-X POST \
|
||||
-d '{"byte_str": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
# tgi service
|
||||
curl http://${host_ip}:3006/generate \
|
||||
-X POST \
|
||||
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
# llm microservice
|
||||
curl http://${host_ip}:3007/v1/chat/completions\
|
||||
-X POST \
|
||||
-d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
# speecht5 service
|
||||
curl http://${host_ip}:7055/v1/tts \
|
||||
-X POST \
|
||||
-d '{"text": "Who are you?"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
# tts microservice
|
||||
curl http://${host_ip}:3002/v1/audio/speech \
|
||||
-X POST \
|
||||
-d '{"text": "Who are you?"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
|
||||
```
|
||||
|
||||
## 🚀 Test MegaService
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3008/v1/audioqna \
|
||||
-X POST \
|
||||
-d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
117
AudioQnA/docker/gaudi/docker_compose.yaml
Normal file
@@ -0,0 +1,117 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
whisper-service:
|
||||
image: opea/whisper:latest
|
||||
container_name: whisper-service
|
||||
ports:
|
||||
- "7066:7066"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
restart: unless-stopped
|
||||
asr:
|
||||
image: opea/asr:latest
|
||||
container_name: asr-service
|
||||
ports:
|
||||
- "3001:9099"
|
||||
ipc: host
|
||||
environment:
|
||||
ASR_ENDPOINT: ${ASR_ENDPOINT}
|
||||
speecht5-service:
|
||||
image: opea/speecht5:latest
|
||||
container_name: speecht5-service
|
||||
ports:
|
||||
- "7055:7055"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
restart: unless-stopped
|
||||
tts:
|
||||
image: opea/tts:latest
|
||||
container_name: tts-service
|
||||
ports:
|
||||
- "3002:9088"
|
||||
ipc: host
|
||||
environment:
|
||||
TTS_ENDPOINT: ${TTS_ENDPOINT}
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/tgi-gaudi:1.2.1
|
||||
container_name: tgi-gaudi-server
|
||||
ports:
|
||||
- "3006:80"
|
||||
volumes:
|
||||
- "./data:/data"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
HABANA_VISIBLE_DEVICES: all
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: opea/llm-tgi:latest
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
ports:
|
||||
- "3007:9000"
|
||||
ipc: host
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-gaudi-backend-server:
|
||||
image: opea/audioqna:latest
|
||||
container_name: audioqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- asr
|
||||
- llm
|
||||
- tts
|
||||
ports:
|
||||
- "3008:8888"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
|
||||
- ASR_SERVICE_PORT=${ASR_SERVICE_PORT}
|
||||
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
||||
- LLM_SERVICE_PORT=${LLM_SERVICE_PORT}
|
||||
- TTS_SERVICE_HOST_IP=${TTS_SERVICE_HOST_IP}
|
||||
- TTS_SERVICE_PORT=${TTS_SERVICE_PORT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
2
AudioQnA/docker/ui/svelte/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
CHAT_URL = 'http://backend_address:3008/v1/audioqna'
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
### 📸 Project Screenshots
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
<h2>🧐 Features</h2>
|
||||
|
||||
|
Before Width: | Height: | Size: 762 B After Width: | Height: | Size: 762 B |
|
Before Width: | Height: | Size: 752 B After Width: | Height: | Size: 752 B |
|
Before Width: | Height: | Size: 838 B After Width: | Height: | Size: 838 B |
|
Before Width: | Height: | Size: 1.6 KiB After Width: | Height: | Size: 1.6 KiB |
|
Before Width: | Height: | Size: 7.5 KiB After Width: | Height: | Size: 7.5 KiB |
|
Before Width: | Height: | Size: 1.0 KiB After Width: | Height: | Size: 1.0 KiB |
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
|
Before Width: | Height: | Size: 5.4 KiB After Width: | Height: | Size: 5.4 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 5.8 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 5.8 KiB |
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 5.8 KiB |
|
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 4.7 KiB |
|
Before Width: | Height: | Size: 429 B After Width: | Height: | Size: 429 B |
|
Before Width: | Height: | Size: 669 B After Width: | Height: | Size: 669 B |
|
Before Width: | Height: | Size: 844 B After Width: | Height: | Size: 844 B |
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |