From 2131383bb217f19c994ebfa15e19e633753be397 Mon Sep 17 00:00:00 2001 From: jaydee Date: Sat, 1 Mar 2025 23:37:20 +0100 Subject: [PATCH] alias --- immich/docker-compose.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/immich/docker-compose.yml b/immich/docker-compose.yml index 78bcf83..b49099a 100644 --- a/immich/docker-compose.yml +++ b/immich/docker-compose.yml @@ -14,8 +14,8 @@ services: image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} extends: file: hwaccel.transcoding.yml - #service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding - service: cpu + service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding + #service: cpu volumes: # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file - /share/docker_data/immich/library:/usr/src/app/upload @@ -39,8 +39,8 @@ services: image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration file: hwaccel.ml.yml - #service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable - service: cpu + service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable + #service: cpu volumes: - model-cache:/cache env_file: