This commit is contained in:
2025-02-26 19:28:38 +01:00
parent 114cc28b11
commit 65bf56db73

View File

@ -13,14 +13,14 @@ services:
container_name: immich_server container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
extends: extends:
file: hwaccel.transcoding.yml file: immich/hwaccel.transcoding.yml
service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
devices: devices:
- /dev/dri/renderD128 - /dev/dri/renderD128
- /dev/dri/card0 - /dev/dri/card0
volumes: volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
#- /share/docker_data/immich/library:/usr/src/app/upload - /share/docker_data/immich/library:/usr/src/app/upload
- /media/nas/nas-photo:/mnt/photos2:ro - /media/nas/nas-photo:/mnt/photos2:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
env_file: env_file:
@ -39,12 +39,12 @@ services:
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda # Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
file: hwaccel.ml.yml # file: hwaccel.ml.yml
service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable # service: openvino # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
devices: # devices:
- /dev/dri/renderD128 # - /dev/dri/renderD128
- /dev/dri/card0 # - /dev/dri/card0
volumes: volumes:
- model-cache:/cache - model-cache:/cache
env_file: env_file: