diff --git a/immich/docker-compose.yml b/immich/docker-compose.yml index a6ff57b..0f8c5fa 100644 --- a/immich/docker-compose.yml +++ b/immich/docker-compose.yml @@ -12,9 +12,12 @@ services: immich-server: container_name: immich_server image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} - # extends: - # file: hwaccel.transcoding.yml - # service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding + extends: + file: hwaccel.transcoding.yml + service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding + devices: + - /dev/dri/renderD128 + - /dev/dri/card0 volumes: # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file #- /share/docker_data/immich/library:/usr/src/app/upload @@ -36,13 +39,17 @@ services: # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # Example tag: ${IMMICH_VERSION:-release}-cuda image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} - # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration - # file: hwaccel.ml.yml - # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable + extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration + file: hwaccel.ml.yml + service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable + devices: + - /dev/dri/renderD128 + - /dev/dri/card0 volumes: - model-cache:/cache env_file: - stack.env + restart: always healthcheck: disable: false