Add README.md for UT scripts.
Signed-off-by: ZePan110 <ze.pan@intel.com>
This commit is contained in:
@@ -7,7 +7,7 @@ source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MM_EMBEDDING_SERVICE_HOST_IP=${host_ip}
|
||||
export MM_RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export LVM_SERVICE_HOST_IP=${host_ip}
|
||||
|
||||
45
MultimodalQnA/tests/README.md
Normal file
45
MultimodalQnA/tests/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# MultimodalQnA E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_rocm.sh
|
||||
```
|
||||
@@ -65,37 +65,12 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function setup_env() {
|
||||
export host_ip=${ip_address}
|
||||
export MM_EMBEDDING_SERVICE_HOST_IP=${host_ip}
|
||||
export MM_RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export LVM_SERVICE_HOST_IP=${host_ip}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export WHISPER_PORT=7066
|
||||
export MAX_IMAGES=1
|
||||
export WHISPER_MODEL="base"
|
||||
export WHISPER_SERVER_ENDPOINT="http://${host_ip}:${WHISPER_PORT}/v1/asr"
|
||||
export COLLECTION_NAME="LangChainCollection"
|
||||
export MILVUS_HOST=${host_ip}
|
||||
export DATAPREP_MMR_PORT=6007
|
||||
export DATAPREP_INGEST_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/ingest"
|
||||
export DATAPREP_GEN_TRANSCRIPT_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/generate_transcripts"
|
||||
export DATAPREP_GEN_CAPTION_SERVICE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/generate_captions"
|
||||
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/get"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:${DATAPREP_MMR_PORT}/v1/dataprep/delete"
|
||||
export EMM_BRIDGETOWER_PORT=6006
|
||||
export BRIDGE_TOWER_EMBEDDING=true
|
||||
export EMBEDDING_MODEL_ID="BridgeTower/bridgetower-large-itm-mlm-itc"
|
||||
export MMEI_EMBEDDING_ENDPOINT="http://${host_ip}:$EMM_BRIDGETOWER_PORT"
|
||||
export MM_EMBEDDING_PORT_MICROSERVICE=6000
|
||||
export MILVUS_RETRIEVER_PORT=7000
|
||||
export LVM_PORT=9399
|
||||
export LLAVA_SERVER_PORT=8399
|
||||
export LVM_MODEL_ID="llava-hf/llava-1.5-7b-hf"
|
||||
export LVM_ENDPOINT="http://${host_ip}:$LLAVA_SERVER_PORT"
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:$MEGA_SERVICE_PORT/v1/multimodalqna"
|
||||
export UI_PORT=5173
|
||||
cd $WORKPATH/docker_compose/intel
|
||||
source set_env.sh
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user