version: '3.8' services: # PostgreSQL database postgres: image: postgres:15-alpine container_name: support-postgres environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: postgres POSTGRES_DB: support ports: - "5432:5432" volumes: - postgres_data:/var/lib/postgresql/data networks: - support-network # Redis for caching and session management redis: image: redis:7-alpine container_name: support-redis ports: - "6379:6379" volumes: - redis_data:/data networks: - support-network # Backend API backend: build: context: ./backend dockerfile: Dockerfile container_name: support-backend environment: DB_HOST: postgres DB_USER: postgres DB_PASSWORD: postgres DB_NAME: support DB_PORT: 5432 DB_SSLMODE: disable DB_TIMEZONE: UTC REDIS_HOST: redis REDIS_PORT: 6379 SERVER_HOST: 0.0.0.0 SERVER_PORT: 8080 AUTH_JWT_SECRET: your-secret-key-change-in-production AUTH_JWT_EXPIRATION_HOURS: 24 AI_OPENAI_API_KEY: ${OPENAI_API_KEY} AI_OPENAI_MODEL: gpt-4 AI_OPENAI_MAX_TOKENS: 4000 AI_OPENAI_TEMPERATURE: 0.7 AI_OPENAI_TOP_P: 1.0 AI_LOCAL_ENDPOINT: http://ollama:11434 AI_LOCAL_MODEL: llama2 AI_LOCAL_MAX_TOKENS: 2000 AI_LOCAL_TEMPERATURE: 0.7 AI_LOCAL_TOP_P: 1.0 LOG_LEVEL: info ports: - "8080:8080" depends_on: - postgres - redis volumes: - ./backend:/app networks: - support-network # Ollama for local LLM ollama: image: ollama/ollama:latest container_name: support-ollama ports: - "11434:11434" volumes: - ollama_data:/root/.ollama networks: - support-network # Frontend (to be implemented) # frontend: # build: # context: ./frontend # dockerfile: Dockerfile # container_name: support-frontend # ports: # - "3000:3000" # depends_on: # - backend # networks: # - support-network volumes: postgres_data: redis_data: ollama_data: networks: support-network: driver: bridge