name: opendevin services: opendevin: image: ghcr.io/opendevin/opendevin:0.7 #image: ghcr.io/opendevin/opendevin:latest container_name: opendevin deploy: resources: reservations: devices: - driver: nvidia device_ids: ['0'] capabilities: [gpu] ports: - 3300:3000 volumes: - /opt/opendevin/base:/opt/workspace_base - /opt/opendevin/logs:/app/logs - /var/run/docker.sock:/var/run/docker.sock restart: unless-stopped environment: - OPENAI_API_KEY=anything - CUSTOM_LLM_PROVIDER=ollama #- LLM_MODEL=deepseek-coder-v2:latest - LLM_MODEL=codestral:latest - LLM_EMBEDDING_MODEL="local" - LLM_API_KEY=ollama - LLM_BASE_URL=http://host.docker.internal:11434 - LLM_OLLAMA_BASE_URL=http://host.docker.internal:11434 - TZ=Europe/London - SANDBOX_USER_ID=0 - WORKSPACE_MOUNT_PATH=/opt/opendevin/base/workspace extra_hosts: host.docker.internal: host-gateway