fix path bug for reorg (#801)

Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
XinyaoWa
2024-09-12 17:52:06 +08:00
committed by GitHub
parent d42292967c
commit 264759d85a
15 changed files with 17 additions and 15 deletions

View File

@@ -16,7 +16,7 @@ function start_agent_and_api_server() {
docker run -d --runtime=runc --name=kdd-cup-24-crag-service -p=8080:8000 docker.io/aicrowd/kdd-cup-24-crag-mock-api:v0
echo "Starting Agent services"
cd $WORKDIR/GenAIExamples/AgentQnA/docker/openai
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/cpu/xeon
bash launch_agent_service_openai.sh
}

View File

@@ -45,7 +45,7 @@ function start_services() {
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
@@ -91,7 +91,7 @@ function validate_megaservice() {
}
#function validate_frontend() {
# cd $WORKPATH/docker/ui/svelte
# cd $WORKPATH/ui/svelte
# local conda_env_name="OPEA_e2e"
# export PATH=${HOME}/miniforge3/bin/:$PATH
## conda remove -n ${conda_env_name} --all -y

View File

@@ -44,7 +44,7 @@ function start_services() {
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
@@ -81,7 +81,7 @@ function validate_megaservice() {
}
#function validate_frontend() {
# cd $WORKPATH/docker/ui/svelte
# cd $WORKPATH/ui/svelte
# local conda_env_name="OPEA_e2e"
# export PATH=${HOME}/miniforge3/bin/:$PATH
## conda remove -n ${conda_env_name} --all -y

View File

@@ -152,7 +152,7 @@ By default, the embedding, reranking and LLM models are set to a default value a
| Reranking | BAAI/bge-reranker-base |
| LLM | Intel/neural-chat-7b-v3-3 |
Change the `xxx_MODEL_ID` in `docker/xxx/set_env.sh` for your needs.
Change the `xxx_MODEL_ID` in `docker_compose/xxx/set_env.sh` for your needs.
For customers with proxy issues, the models from [ModelScope](https://www.modelscope.cn/models) are also supported in ChatQnA. Refer to [this readme](docker_compose/intel/cpu/xeon/README.md) for details.

View File

@@ -107,7 +107,7 @@ To construct the Mega Service, we utilize the [GenAIComps](https://github.com/op
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/ChatQnA/docker
cd GenAIExamples/ChatQnA/
docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
cd ../../..
```

View File

@@ -67,7 +67,7 @@ To set up environment variables for deploying ChatQnA services, follow these ste
3. Set up other environment variables:
```bash
source ./docker/set_env.sh
source ./docker_compose/set_env.sh
```
### Deploy CodeGen using Docker

View File

@@ -30,7 +30,7 @@ By default, the LLM model is set to a default value as listed below:
| ------- | ----------------------------- |
| LLM | HuggingFaceH4/mistral-7b-grok |
Change the `LLM_MODEL_ID` in `docker/set_env.sh` for your needs.
Change the `LLM_MODEL_ID` in `docker_compose/set_env.sh` for your needs.
### Setup Environment Variable
@@ -58,7 +58,7 @@ To set up environment variables for deploying Code Translation services, follow
3. Set up other environment variables:
```bash
source ./docker/set_env.sh
source ./docker_compose/set_env.sh
```
### Deploy with Docker

View File

@@ -92,7 +92,8 @@ Change the `LLM_MODEL_ID` below for your needs.
3. Set up other environment variables:
```bash
source ../set_env.sh
cd GenAIExamples/CodeTrans/docker_compose
source ./set_env.sh
```
### Start Microservice Docker Containers

View File

@@ -84,7 +84,8 @@ Change the `LLM_MODEL_ID` below for your needs.
3. Set up other environment variables:
```bash
source ../set_env.sh
cd GenAIExamples/CodeTrans/docker_compose
source ./set_env.sh
```
### Start Microservice Docker Containers

View File

@@ -25,7 +25,7 @@ Currently we support two ways of deploying Document Summarization services with
### Required Models
We set default model as "Intel/neural-chat-7b-v3-3", change "LLM_MODEL_ID" in "set_env.sh" if you want to use other models.
We set default model as "Intel/neural-chat-7b-v3-3", change "LLM_MODEL_ID" in "docker_compose/set_env.sh" if you want to use other models.
```
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
@@ -57,7 +57,7 @@ To set up environment variables for deploying Document Summarization services, f
3. Set up other environment variables:
```bash
source ./docker/set_env.sh
source ./docker_compose/set_env.sh
```
### Deploy using Docker

View File

@@ -60,7 +60,7 @@ To set up environment variables for deploying SearchQnA services, follow these s
3. Set up other environment variables:
```bash
source ./docker/set_env.sh
source ./docker_compose/set_env.sh
```
### Deploy SearchQnA on Gaudi