diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml index f9e7e2628..797395100 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml @@ -8,7 +8,7 @@ services: ports: - "8028:80" volumes: - - "${MODEL_CACHE}:/data" + - "${MODEL_CACHE:-./data}:/data" shm_size: 1g environment: no_proxy: ${no_proxy} diff --git a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml index 62ec96e62..19a77bef5 100644 --- a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml @@ -8,7 +8,7 @@ services: ports: - "8028:80" volumes: - - "${MODEL_CACHE}:/data" + - "${MODEL_CACHE:-./data}:/data" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml index 0ece6dff1..2028760c4 100644 --- a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml @@ -8,7 +8,7 @@ services: ports: - "8008:80" volumes: - - "${MODEL_CACHE}:/data" + - "${MODEL_CACHE:-./data}:/data" shm_size: 1g environment: no_proxy: ${no_proxy} diff --git a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml index 3e25dee89..e697a0927 100644 --- a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml @@ -8,7 +8,7 @@ services: ports: - "8008:80" volumes: - - "${MODEL_CACHE}:/data" + - "${MODEL_CACHE:-./data}:/data" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index 0d87eaeb2..8d91db5e7 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -21,7 +21,7 @@ services: timeout: 10s retries: 100 volumes: - - "${MODEL_CACHE}:/data" + - "${MODEL_CACHE:-./data}:/data" shm_size: 1g command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS}