diff --git a/immich/docker-compose.yml b/immich/docker-compose.yml index 5b19a22..b3839d6 100644 --- a/immich/docker-compose.yml +++ b/immich/docker-compose.yml @@ -12,12 +12,12 @@ services: immich-server: container_name: immich_server image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} - # extends: - # file: hwaccel.transcoding.yml - # service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding - # devices: - # - /dev/dri/renderD128 - # - /dev/dri/card0 + extends: + file: hwaccel.transcoding.yml + service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding + devices: + - /dev/dri/renderD128 + - /dev/dri/card1 volumes: # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file - /share/docker_data/immich/library:/usr/src/app/upload @@ -39,12 +39,12 @@ services: # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # Example tag: ${IMMICH_VERSION:-release}-cuda image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} - # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration - # file: hwaccel.ml.yml - # service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable - # devices: - # - /dev/dri/renderD128 - # - /dev/dri/card0 + extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration + file: hwaccel.ml.yml + service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable + devices: + - /dev/dri/renderD128 + - /dev/dri/card1 volumes: - model-cache:/cache env_file: