Refactor Vectorstores to E-RAG Style (#1159)

Move DBs with yaml/Dockerfile into third_parties folder and delete vectorstores folder.
Modify related components for change of vectordb folder path.
Remained vectorestores:
- thirdparties
    - elasticsearch
    - milvus
    - opensearch
    - pathway
    - pgvector
    - redis
    - vdms

Fix issue https://github.com/opea-project/GenAIComps/issues/1008.

Signed-off-by: letonghan <letong.han@intel.com>
This commit is contained in:
Letong Han
2025-01-16 19:46:11 +08:00
committed by GitHub
parent d6312a73b9
commit 8e48849c49
60 changed files with 50 additions and 298 deletions

1
.github/CODEOWNERS vendored
View File

@@ -26,5 +26,4 @@
/comps/text2image/ xinyu.ye@intel.com liang1.lv@intel.com
/comps/text2sql/ yogesh.pandey@intel.com qing.yao@intel.com
/comps/tts/ sihan.chen@intel.com letong.han@intel.com
/comps/vectorstores/ xinyu.ye@intel.com letong.han@intel.com
/comps/web_retrievers/ sihan.chen@intel.com liang1.lv@intel.com

View File

@@ -18,6 +18,10 @@ services:
build:
dockerfile: comps/third_parties/bridgetower/src/Dockerfile.intel_hpu
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest}
pathway:
build:
dockerfile: comps/third_parties/pathway/src/Dockerfile
image: ${REGISTRY:-opea}/pathway:${TAG:-latest}
wav2lip:
build:
dockerfile: comps/third_parties/wav2lip/src/Dockerfile

View File

@@ -1,9 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# this file should be run in the root of the repo
services:
vectorstore-pathway:
build:
dockerfile: comps/vectorstores/pathway/Dockerfile
image: ${REGISTRY:-opea}/vectorstore-pathway:${TAG:-latest}

View File

@@ -7,7 +7,7 @@ on:
inputs:
services:
default: "asr"
description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,intent_detection,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,vectorstores,web_retrievers]"
description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,intent_detection,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,web_retrievers]"
required: true
type: string
build:

View File

@@ -7,7 +7,7 @@ on:
inputs:
services:
default: ""
description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,intent_detection,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,vectorstores,web_retrievers]"
description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,intent_detection,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,web_retrievers]"
required: false
type: string
images:

View File

@@ -7,7 +7,7 @@ on:
inputs:
services:
default: "asr"
description: "List of services to test [agent_langchain,asr,chathistory_mongo,dataprep_milvus...]" #,embeddings,guardrails,llms,lvms,prompt_registry,ragas,rerankings,retrievers,tts,vectorstores,web_retrievers]"
description: "List of services to test [agent_langchain,asr,chathistory_mongo,dataprep_milvus...]" #,embeddings,guardrails,llms,lvms,prompt_registry,ragas,rerankings,retrievers,tts,web_retrievers]"
required: false
type: string
images:

View File

@@ -17,7 +17,7 @@ export INDEX_NAME=${your_index_name}
### 1.3 Start Elasticsearch
Please refer to this [readme](../../../vectorstores/elasticsearch/README.md).
Please refer to this [readme](../../../third_parties/elasticsearch/src/README.md).
### 1.4 Start Document Preparation Microservice for Elasticsearch with Python Script
@@ -31,7 +31,7 @@ python prepare_doc_elastic.py
### 2.1 Start Elasticsearch
Please refer to this [readme](../../../vectorstores/elasticsearch/README.md).
Please refer to this [readme](../../../third_parties/elasticsearch/src/README.md).
### 2.2 Setup Environment Variables

View File

@@ -13,7 +13,7 @@ apt-get install poppler-utils -y
### 1.2 Start Milvus Server
Please refer to this [readme](../../../vectorstores/milvus/README.md).
Please refer to this [readme](../../../third_parties/milvus/src/README.md).
### 1.3 Setup Environment Variables
@@ -56,7 +56,7 @@ python prepare_doc_milvus.py
### 2.1 Start Milvus Server
Please refer to this [readme](../../../vectorstores/milvus/README.md).
Please refer to this [readme](../../../third_parties/milvus/src/README.md).
### 2.2 Build Docker Image

View File

@@ -23,7 +23,7 @@ pip install -r requirements.txt
### 1.2 Start Redis Stack Server
Please refer to this [readme](../../../../vectorstores/redis/README.md).
Please refer to this [readme](../../../../third_parties/redis/src/README.md).
### 1.3 Setup Environment Variables
@@ -58,7 +58,7 @@ python prepare_videodoc_redis.py
### 2.1 Start Redis Stack Server
Please refer to this [readme](../../../../vectorstores/redis/README.md).
Please refer to this [readme](../../../../third_parties/redis/src/README.md).
### 2.2 Start LVM Microservice (Optional)

View File

@@ -18,7 +18,7 @@ pip install -r requirements.txt
### 1.2 Start OpenSearch Stack Server
Please refer to this [readme](../../vectorstores/opensearch/README.md).
Please refer to this [readme](../../third_parties/opensearch/src/README.md).
### 1.3 Setup Environment Variables
@@ -69,7 +69,7 @@ python prepare_doc_opensearch.py
### 2.1 Start OpenSearch Stack Server
Please refer to this [readme](../../vectorstores/opensearch/README.md).
Please refer to this [readme](../../third_parties/opensearch/src/README.md).
### 2.2 Setup Environment Variables

View File

@@ -17,7 +17,7 @@ export INDEX_NAME=${your_index_name}
### 1.3 Start PGVector
Please refer to this [readme](../../../vectorstores/pgvector/README.md).
Please refer to this [readme](../../../third_parties/pgvector/src/README.md).
### 1.4 Start Document Preparation Microservice for PGVector with Python Script
@@ -31,7 +31,7 @@ python prepare_doc_pgvector.py
### 2.1 Start PGVector
Please refer to this [readme](../../../vectorstores/pgvector/README.md).
Please refer to this [readme](../../../third_parties/pgvector/src/README.md).
### 2.2 Setup Environment Variables

View File

@@ -10,7 +10,24 @@ pip install -r requirements.txt
### Start Pinecone Server
Please refer to this [readme](../../../vectorstores/pinecone/README.md).
1. Create Pinecone account from the below link
https://app.pinecone.io/
More details from Pinecone quick start guide https://docs.pinecone.io/guides/get-started/quickstart
2. Get API key
API Key is needed to make the API calls. API key can get it from the Project -> Manage -> API keys
3. Create the index in https://app.pinecone.io/
Following details are to be provided
- Index name
- Based on the embedding model selected, following has to be provided
a. Dimensions
b. Metric
### Setup Environment Variables

View File

@@ -13,7 +13,7 @@ apt-get install poppler-utils -y
### Start Qdrant Server
Please refer to this [readme](../../../vectorstores/qdrant/README.md).
docker run -p 6333:6333 -p 6334:6334 -v ./qdrant_storage:/qdrant/storage:z qdrant/qdrant
### Setup Environment Variables

View File

@@ -33,7 +33,7 @@ cd langchain_ray; pip install -r requirements_ray.txt
### 1.2 Start Redis Stack Server
Please refer to this [readme](../../vectorstores/redis/README.md).
Please refer to this [readme](../../third_parties/redis/src/README.md).
### 1.3 Setup Environment Variables
@@ -90,7 +90,7 @@ python prepare_doc_redis_on_ray.py
### 2.1 Start Redis Stack Server
Please refer to this [readme](../../vectorstores/redis/README.md).
Please refer to this [readme](../../third_parties/redis/src/README.md).
### 2.2 Setup Environment Variables

View File

@@ -27,7 +27,7 @@ cd langchain_ray; pip install -r requirements_ray.txt
### 1.2 Start VDMS Server
Refer to this [readme](../../vectorstores/vdms/README.md).
Refer to this [readme](../../third_parties/vdms/src/README.md).
### 1.3 Setup Environment Variables
@@ -60,7 +60,7 @@ python prepare_doc_redis_on_ray.py
### 2.1 Start VDMS Server
Refer to this [readme](../../vectorstores/vdms/README.md).
Refer to this [readme](../../third_parties/vdms/src/README.md).
### 2.2 Setup Environment Variables

View File

@@ -15,12 +15,12 @@ RUN apt-get update && apt-get install -y \
WORKDIR /app
COPY comps/vectorstores/pathway/requirements.txt /app/
COPY comps/third_parties/pathway/src/requirements.txt /app/
RUN if [ ${ARCH} = "cpu" ]; then pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \
pip install --no-cache-dir -r requirements.txt
COPY comps/vectorstores/pathway/vectorstore_pathway.py /app/
COPY comps/third_parties/pathway/src/vectorstore_pathway.py /app/
CMD ["python", "vectorstore_pathway.py"]

View File

@@ -56,7 +56,7 @@ For more information, see the relevant Pathway docs:
Build the Docker and run the Pathway Vector Store:
```bash
docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:latest -f comps/vectorstores/src/pathway/Dockerfile .
docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:latest -f comps/third_parties/pathway/src/Dockerfile .
# with locally loaded model, you may add `EMBED_MODEL` env variable to configure the model.
docker run -e PATHWAY_HOST=${PATHWAY_HOST} -e PATHWAY_PORT=${PATHWAY_PORT} -e http_proxy=$http_proxy -e https_proxy=$https_proxy -v ./data:/app/data -p ${PATHWAY_PORT}:${PATHWAY_PORT} opea/vectorstore-pathway:latest

View File

@@ -1,39 +0,0 @@
# Vectorstores Microservice
The Vectorstores Microservice provides convenient way to start various vector database servers.
## Vectorstores Microservice with Redis
For details, please refer to this [readme](redis/README.md)
## Vectorstores Microservice with Qdrant
For details, please refer to this [readme](qdrant/README.md)
## Vectorstores Microservice with PGVector
For details, please refer to this [readme](pgvector/README.md)
## Vectorstores Microservice with Pinecone
For details, please refer to this [readme](pinecone/README.md)
## Vectorstores Microservice with Pathway
For details, please refer to this [readme](pathway/README.md)
## Vectorstores Microservice with Milvus
For details, please refer to this [readme](milvus/README.md)
## Vectorstores Microservice with LanceDB
For details, please refer to this [readme](lancedb/README.md)
## Vectorstores Microservice with Chroma
For details, please refer to this [readme](chroma/README.md)
## Vectorstores Microservice with VDMS
For details, please refer to this [readme](vdms/README.md)

View File

@@ -1,38 +0,0 @@
# Start Chroma server
## Introduction
Chroma is a AI-native open-source vector database focused on developer productivity and happiness. Chroma is licensed under Apache 2.0. Chroma runs in various modes, we can deploy it as a server running your local machine or in the cloud.
## Getting Started
### Start Chroma Server
To start the Chroma server on your local machine, follow these steps:
```bash
git clone https://github.com/chroma-core/chroma.git
cd chroma
docker compose up -d
```
### Start Log Output
Upon starting the server, you should see log outputs similar to the following:
```
server-1 | Starting 'uvicorn chromadb.app:app' with args: --workers 1 --host 0.0.0.0 --port 8000 --proxy-headers --log-config chromadb/log_config.yml --timeout-keep-alive 30
server-1 | INFO: [02-08-2024 07:03:19] Set chroma_server_nofile to 65536
server-1 | INFO: [02-08-2024 07:03:19] Anonymized telemetry enabled. See https://docs.trychroma.com/telemetry for more information.
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component System
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component OpenTelemetryClient
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component SqliteDB
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component QuotaEnforcer
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component Posthog
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component LocalSegmentManager
server-1 | DEBUG: [02-08-2024 07:03:19] Starting component SegmentAPI
server-1 | INFO: [02-08-2024 07:03:19] Started server process [1]
server-1 | INFO: [02-08-2024 07:03:19] Waiting for application startup.
server-1 | INFO: [02-08-2024 07:03:19] Application startup complete.
server-1 | INFO: [02-08-2024 07:03:19] Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
```

View File

@@ -1,139 +0,0 @@
# Start LanceDB Server
LanceDB is an embedded vector database for AI applications. It is open source and distributed with an Apache-2.0 license.
LanceDB datasets are persisted to disk and can be shared in Python.
## Setup
```bash
npm install -S vectordb
```
## Usage
### Create a new index from texts
```python
import os
import tempfile
from langchain.vectorstores import LanceDB
from langchain.embeddings.openai import OpenAIEmbeddings
from vectordb import connect
async def run():
dir = tempfile.mkdtemp(prefix="lancedb-")
db = await connect(dir)
table = await db.create_table("vectors", [{"vector": [0] * 1536, "text": "sample", "id": 1}])
vector_store = await LanceDB.from_texts(
["Hello world", "Bye bye", "hello nice world"],
[{"id": 2}, {"id": 1}, {"id": 3}],
OpenAIEmbeddings(),
table=table,
)
result_one = await vector_store.similarity_search("hello world", 1)
print(result_one)
# [ Document(page_content='hello nice world', metadata={'id': 3}) ]
# Run the function
import asyncio
asyncio.run(run())
```
API Reference:
- `LanceDB` from `@langchain/community/vectorstores/lancedb`
- `OpenAIEmbeddings` from `@langchain/openai`
### Create a new index from a loader
```python
import os
import tempfile
from langchain.vectorstores import LanceDB
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders.fs import TextLoader
from vectordb import connect
# Create docs with a loader
loader = TextLoader("src/document_loaders/example_data/example.txt")
docs = loader.load()
async def run():
dir = tempfile.mkdtemp(prefix="lancedb-")
db = await connect(dir)
table = await db.create_table("vectors", [{"vector": [0] * 1536, "text": "sample", "source": "a"}])
vector_store = await LanceDB.from_documents(docs, OpenAIEmbeddings(), table=table)
result_one = await vector_store.similarity_search("hello world", 1)
print(result_one)
# [
# Document(page_content='Foo\nBar\nBaz\n\n', metadata={'source': 'src/document_loaders/example_data/example.txt'})
# ]
# Run the function
import asyncio
asyncio.run(run())
```
API Reference:
- `LanceDB` from `@langchain/community/vectorstores/lancedb`
- `OpenAIEmbeddings` from `@langchain/openai`
- `TextLoader` from `langchain/document_loaders/fs/text`
### Open an existing dataset
```python
import os
import tempfile
from langchain.vectorstores import LanceDB
from langchain.embeddings.openai import OpenAIEmbeddings
from vectordb import connect
async def run():
uri = await create_test_db()
db = await connect(uri)
table = await db.open_table("vectors")
vector_store = LanceDB(OpenAIEmbeddings(), table=table)
result_one = await vector_store.similarity_search("hello world", 1)
print(result_one)
# [ Document(page_content='Hello world', metadata={'id': 1}) ]
async def create_test_db():
dir = tempfile.mkdtemp(prefix="lancedb-")
db = await connect(dir)
await db.create_table(
"vectors",
[
{"vector": [0] * 1536, "text": "Hello world", "id": 1},
{"vector": [0] * 1536, "text": "Bye bye", "id": 2},
{"vector": [0] * 1536, "text": "hello nice world", "id": 3},
],
)
return dir
# Run the function
import asyncio
asyncio.run(run())
```
API Reference:
- `LanceDB` from `@langchain/community/vectorstores/lancedb`
- `OpenAIEmbeddings` from `@langchain/openai`

View File

@@ -1,2 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

View File

@@ -1,20 +0,0 @@
# Pinecone setup
## 1. Create Pinecone account from the below link
https://app.pinecone.io/
More details from Pinecone quick start guide https://docs.pinecone.io/guides/get-started/quickstart
## 2. Get API key
API Key is needed to make the API calls. API key can get it from the Project -> Manage -> API keys
## 3. Create the index in https://app.pinecone.io/
Following details are to be provided
1. Index name
2. Based on the embedding model selected, following has to be provided
a. Dimensions
b. Metric

View File

@@ -1,2 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

View File

@@ -1,13 +0,0 @@
# Start Qdrant server
## 1. Download Qdrant image
```bash
docker pull qdrant/qdrant
```
## 2. Run Qdrant service
```bash
docker run -p 6333:6333 -p 6334:6334 -v $(pwd)/qdrant_storage:/qdrant/storage:z qdrant/qdrant
```

View File

@@ -1,2 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

View File

@@ -1,2 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

View File

@@ -1,2 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

View File

@@ -30,7 +30,7 @@ function start_service() {
export POSTGRES_PASSWORD=testpwd
export POSTGRES_DB=vectordb
docker run --name test-comps-vectorstore-postgres -e POSTGRES_USER=${POSTGRES_USER} -e POSTGRES_HOST_AUTH_METHOD=trust -e POSTGRES_DB=${POSTGRES_DB} -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -p 5432:5432 -d -v $WORKPATH/comps/vectorstores/pgvector/init.sql:/docker-entrypoint-initdb.d/init.sql pgvector/pgvector:0.7.0-pg16
docker run --name test-comps-vectorstore-postgres -e POSTGRES_USER=${POSTGRES_USER} -e POSTGRES_HOST_AUTH_METHOD=trust -e POSTGRES_DB=${POSTGRES_DB} -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -p 5432:5432 -d -v $WORKPATH/comps/third_parties/pgvector/src/init.sql:/docker-entrypoint-initdb.d/init.sql pgvector/pgvector:0.7.0-pg16
sleep 10s

View File

@@ -10,7 +10,7 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH
docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:comps -f comps/vectorstores/pathway/Dockerfile .
docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:comps -f comps/third_parties/pathway/src/Dockerfile .
docker build --no-cache -t opea/retriever-pathway:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/src/Dockerfile .
if [ $? -ne 0 ]; then
@@ -34,7 +34,7 @@ function start_service() {
export PATHWAY_HOST="0.0.0.0"
export PATHWAY_PORT=5433
docker run -d --name="test-comps-retriever-pathway-vectorstore" -e PATHWAY_HOST=${PATHWAY_HOST} -e PATHWAY_PORT=${PATHWAY_PORT} -e TEI_EMBEDDING_ENDPOINT=${TEI_EMBEDDING_ENDPOINT} -e http_proxy=$http_proxy -e https_proxy=$https_proxy -v $WORKPATH/comps/vectorstores/pathway/README.md:/app/data/README.md -p ${PATHWAY_PORT}:${PATHWAY_PORT} --network="host" opea/vectorstore-pathway:comps
docker run -d --name="test-comps-retriever-pathway-vectorstore" -e PATHWAY_HOST=${PATHWAY_HOST} -e PATHWAY_PORT=${PATHWAY_PORT} -e TEI_EMBEDDING_ENDPOINT=${TEI_EMBEDDING_ENDPOINT} -e http_proxy=$http_proxy -e https_proxy=$https_proxy -v $WORKPATH/comps/third_parties/pathway/src/README.md:/app/data/README.md -p ${PATHWAY_PORT}:${PATHWAY_PORT} --network="host" opea/vectorstore-pathway:comps
sleep 30s

View File

@@ -25,7 +25,7 @@ function start_service() {
export POSTGRES_DB=vectordb
pgvector_port=5079
docker run --name test-comps-retriever-pgvector-vectorstore -e POSTGRES_USER=${POSTGRES_USER} -e POSTGRES_HOST_AUTH_METHOD=trust -e POSTGRES_DB=${POSTGRES_DB} -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -d -v $WORKPATH/comps/vectorstores/pgvector/init.sql:/docker-entrypoint-initdb.d/init.sql -p $pgvector_port:5432 pgvector/pgvector:0.7.0-pg16
docker run --name test-comps-retriever-pgvector-vectorstore -e POSTGRES_USER=${POSTGRES_USER} -e POSTGRES_HOST_AUTH_METHOD=trust -e POSTGRES_DB=${POSTGRES_DB} -e POSTGRES_PASSWORD=${POSTGRES_PASSWORD} -d -v $WORKPATH/comps/third_parties/pgvector/src/init.sql:/docker-entrypoint-initdb.d/init.sql -p $pgvector_port:5432 pgvector/pgvector:0.7.0-pg16
sleep 10s
# pgvector retriever

View File

@@ -10,7 +10,7 @@ ip_address=$(hostname -I | awk '{print $1}')
function start_service() {
cd $WORKPATH/comps/vectorstores/milvus
cd $WORKPATH/comps/third_parties/milvus/deployment/docker_compose
rm -rf volumes/
docker compose up -d

View File

@@ -10,7 +10,7 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH
docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:comps -f comps/vectorstores/pathway/Dockerfile .
docker build --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/vectorstore-pathway:comps -f comps/third_parties/pathway/src/Dockerfile .
cd $WORKPATH
@@ -45,7 +45,7 @@ function start_service() {
export PATHWAY_HOST="0.0.0.0"
export PATHWAY_PORT=5437
docker run -d --name="test-comps-vectorstore-pathway-ms" -e PATHWAY_HOST=${PATHWAY_HOST} -e PATHWAY_PORT=${PATHWAY_PORT} -e TEI_EMBEDDING_ENDPOINT=${TEI_EMBEDDING_ENDPOINT} -e http_proxy=$http_proxy -e https_proxy=$https_proxy -v $WORKPATH/comps/vectorstores/pathway/README.md:/app/data/README.md -p ${PATHWAY_PORT}:${PATHWAY_PORT} --network="host" opea/vectorstore-pathway:comps
docker run -d --name="test-comps-vectorstore-pathway-ms" -e PATHWAY_HOST=${PATHWAY_HOST} -e PATHWAY_PORT=${PATHWAY_PORT} -e TEI_EMBEDDING_ENDPOINT=${TEI_EMBEDDING_ENDPOINT} -e http_proxy=$http_proxy -e https_proxy=$https_proxy -v $WORKPATH/comps/third_parties/pathway/src/README.md:/app/data/README.md -p ${PATHWAY_PORT}:${PATHWAY_PORT} --network="host" opea/vectorstore-pathway:comps
sleep 70s