fix path bug for reorg (#801)
Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
@@ -16,7 +16,7 @@ function start_agent_and_api_server() {
|
||||
docker run -d --runtime=runc --name=kdd-cup-24-crag-service -p=8080:8000 docker.io/aicrowd/kdd-cup-24-crag-mock-api:v0
|
||||
|
||||
echo "Starting Agent services"
|
||||
cd $WORKDIR/GenAIExamples/AgentQnA/docker/openai
|
||||
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/cpu/xeon
|
||||
bash launch_agent_service_openai.sh
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ function start_services() {
|
||||
export TTS_SERVICE_PORT=3002
|
||||
export LLM_SERVICE_PORT=3007
|
||||
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -91,7 +91,7 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
#function validate_frontend() {
|
||||
# cd $WORKPATH/docker/ui/svelte
|
||||
# cd $WORKPATH/ui/svelte
|
||||
# local conda_env_name="OPEA_e2e"
|
||||
# export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
## conda remove -n ${conda_env_name} --all -y
|
||||
|
||||
@@ -44,7 +44,7 @@ function start_services() {
|
||||
export TTS_SERVICE_PORT=3002
|
||||
export LLM_SERVICE_PORT=3007
|
||||
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
|
||||
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -81,7 +81,7 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
#function validate_frontend() {
|
||||
# cd $WORKPATH/docker/ui/svelte
|
||||
# cd $WORKPATH/ui/svelte
|
||||
# local conda_env_name="OPEA_e2e"
|
||||
# export PATH=${HOME}/miniforge3/bin/:$PATH
|
||||
## conda remove -n ${conda_env_name} --all -y
|
||||
|
||||
@@ -152,7 +152,7 @@ By default, the embedding, reranking and LLM models are set to a default value a
|
||||
| Reranking | BAAI/bge-reranker-base |
|
||||
| LLM | Intel/neural-chat-7b-v3-3 |
|
||||
|
||||
Change the `xxx_MODEL_ID` in `docker/xxx/set_env.sh` for your needs.
|
||||
Change the `xxx_MODEL_ID` in `docker_compose/xxx/set_env.sh` for your needs.
|
||||
|
||||
For customers with proxy issues, the models from [ModelScope](https://www.modelscope.cn/models) are also supported in ChatQnA. Refer to [this readme](docker_compose/intel/cpu/xeon/README.md) for details.
|
||||
|
||||
|
||||
@@ -107,7 +107,7 @@ To construct the Mega Service, we utilize the [GenAIComps](https://github.com/op
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/ChatQnA/docker
|
||||
cd GenAIExamples/ChatQnA/
|
||||
docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
cd ../../..
|
||||
```
|
||||
|
||||
@@ -67,7 +67,7 @@ To set up environment variables for deploying ChatQnA services, follow these ste
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ./docker/set_env.sh
|
||||
source ./docker_compose/set_env.sh
|
||||
```
|
||||
|
||||
### Deploy CodeGen using Docker
|
||||
|
||||
@@ -30,7 +30,7 @@ By default, the LLM model is set to a default value as listed below:
|
||||
| ------- | ----------------------------- |
|
||||
| LLM | HuggingFaceH4/mistral-7b-grok |
|
||||
|
||||
Change the `LLM_MODEL_ID` in `docker/set_env.sh` for your needs.
|
||||
Change the `LLM_MODEL_ID` in `docker_compose/set_env.sh` for your needs.
|
||||
|
||||
### Setup Environment Variable
|
||||
|
||||
@@ -58,7 +58,7 @@ To set up environment variables for deploying Code Translation services, follow
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ./docker/set_env.sh
|
||||
source ./docker_compose/set_env.sh
|
||||
```
|
||||
|
||||
### Deploy with Docker
|
||||
|
||||
@@ -92,7 +92,8 @@ Change the `LLM_MODEL_ID` below for your needs.
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ../set_env.sh
|
||||
cd GenAIExamples/CodeTrans/docker_compose
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
### Start Microservice Docker Containers
|
||||
|
||||
@@ -84,7 +84,8 @@ Change the `LLM_MODEL_ID` below for your needs.
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ../set_env.sh
|
||||
cd GenAIExamples/CodeTrans/docker_compose
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
### Start Microservice Docker Containers
|
||||
|
||||
@@ -25,7 +25,7 @@ Currently we support two ways of deploying Document Summarization services with
|
||||
|
||||
### Required Models
|
||||
|
||||
We set default model as "Intel/neural-chat-7b-v3-3", change "LLM_MODEL_ID" in "set_env.sh" if you want to use other models.
|
||||
We set default model as "Intel/neural-chat-7b-v3-3", change "LLM_MODEL_ID" in "docker_compose/set_env.sh" if you want to use other models.
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
@@ -57,7 +57,7 @@ To set up environment variables for deploying Document Summarization services, f
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ./docker/set_env.sh
|
||||
source ./docker_compose/set_env.sh
|
||||
```
|
||||
|
||||
### Deploy using Docker
|
||||
|
||||
@@ -60,7 +60,7 @@ To set up environment variables for deploying SearchQnA services, follow these s
|
||||
3. Set up other environment variables:
|
||||
|
||||
```bash
|
||||
source ./docker/set_env.sh
|
||||
source ./docker_compose/set_env.sh
|
||||
```
|
||||
|
||||
### Deploy SearchQnA on Gaudi
|
||||
|
||||
Reference in New Issue
Block a user