未在服务Docker-Compose.yml中指定卷

发布于 2025-02-07 11:29:17 字数 4163 浏览 2 评论 0原文

Docker版本20.10.17,Build 100C701

我有一个VITE,VUE 3前端和Django后端,带有Postgres数据库,芹菜和Redis。

当我不包含API和芹菜服务Docker组成的卷时,就成功了。

如果我在API和芹菜(./api:/api)中添加卷

  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

,那么我会收到以下错误:

错误:“ -a' /' - app'的无效值:无法加载芹菜 应用。找不到模块后端。

这告诉我卷的路径不正确 - 尽管我不确定我应该将其设置为什么。 没有为这些服务指定量是否有任何伤害?

文件夹结构

.
├── backend
│   ├── backend
│   │   ├── backend
│   │   ├── core
│   │   ├── db.sqlite3
│   │   ├── Dockerfile
│   │   ├── manage.py
│   │   └── requirements.txt
│   └── venv
├── docker-compose.yml
└── frontend
    ├── Dockerfile
    ├── entrypoint.sh
    ├── index.html
    ├── node_modules
    ├── package.json
    ├── package-lock.json
    ├── postcss.config.js
    ├── public
    ├── README.md
    ├── src
    ├── tailwind.config.js
    ├── tsconfig.json
    ├── tsconfig.node.json
    └── vite.config.ts

前端dockerfile

FROM node:lts-alpine3.16
RUN addgroup app && adduser -S -G app app
USER app
WORKDIR /web
COPY entrypoint.sh /entrypoint.sh
USER root
RUN chmod +x /entrypoint.sh
USER app
COPY package*.json ./
USER root
RUN npm install
USER app
COPY . .
ENTRYPOINT ["/entrypoint.sh"]

后端dockerfile

FROM python:3-alpine
RUN addgroup app && adduser -S -G app app
USER root
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
WORKDIR /api
COPY requirements.txt .
RUN apk add --update --no-cache postgresql-client jpeg-dev
RUN apk add --update --no-cache --virtual .tmp-build-deps \ 
    gcc libc-dev linux-headers postgresql-dev musl-dev zlib zlib-dev
RUN pip install -r requirements.txt
RUN apk del .tmp-build-deps
USER app
COPY . .
USER root
RUN chown -R app backend manage.py
USER app

docker-compose.yml

version: '3.9'

services:
  
  web:
    build: 
      context: ./frontend
      dockerfile: Dockerfile
    ports:
      - "3000:3000"
    command: npm run dev
 
  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
  
  db:
    image: postgres:14.1-alpine
    environment:
    - POSTGRES_DB=${DATABASE_NAME}
    - POSTGRES_USER=${DATABASE_USER}
    - POSTGRES_PASSWORD=${DATABASE_PASSWORD}
    volumes: 
      - pgdata:/var/lib/postgresql/data
  
  redis:
    image: redis:alpine

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

volumes:
  pgdata:

Docker version 20.10.17, build 100c701

I have a vite, vue 3 frontend and django backend with postgres database, celery and redis.

When I do not include volumes for api and celery service docker compose up is successful.

If I add volumes to api and celery (./api:/api)

  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

then I get the following error:

Error: Invalid value for '-A' / '--app': Unable to load celery
application. The module backend was not found.

which tells me that the path for the volume is not correct - though I am not sure what I should set it as.
Is there any harm in not specifying volumes for these services?

folder structure

.
├── backend
│   ├── backend
│   │   ├── backend
│   │   ├── core
│   │   ├── db.sqlite3
│   │   ├── Dockerfile
│   │   ├── manage.py
│   │   └── requirements.txt
│   └── venv
├── docker-compose.yml
└── frontend
    ├── Dockerfile
    ├── entrypoint.sh
    ├── index.html
    ├── node_modules
    ├── package.json
    ├── package-lock.json
    ├── postcss.config.js
    ├── public
    ├── README.md
    ├── src
    ├── tailwind.config.js
    ├── tsconfig.json
    ├── tsconfig.node.json
    └── vite.config.ts

frontend Dockerfile

FROM node:lts-alpine3.16
RUN addgroup app && adduser -S -G app app
USER app
WORKDIR /web
COPY entrypoint.sh /entrypoint.sh
USER root
RUN chmod +x /entrypoint.sh
USER app
COPY package*.json ./
USER root
RUN npm install
USER app
COPY . .
ENTRYPOINT ["/entrypoint.sh"]

backend Dockerfile

FROM python:3-alpine
RUN addgroup app && adduser -S -G app app
USER root
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
WORKDIR /api
COPY requirements.txt .
RUN apk add --update --no-cache postgresql-client jpeg-dev
RUN apk add --update --no-cache --virtual .tmp-build-deps \ 
    gcc libc-dev linux-headers postgresql-dev musl-dev zlib zlib-dev
RUN pip install -r requirements.txt
RUN apk del .tmp-build-deps
USER app
COPY . .
USER root
RUN chown -R app backend manage.py
USER app

docker-compose.yml

version: '3.9'

services:
  
  web:
    build: 
      context: ./frontend
      dockerfile: Dockerfile
    ports:
      - "3000:3000"
    command: npm run dev
 
  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
  
  db:
    image: postgres:14.1-alpine
    environment:
    - POSTGRES_DB=${DATABASE_NAME}
    - POSTGRES_USER=${DATABASE_USER}
    - POSTGRES_PASSWORD=${DATABASE_PASSWORD}
    volumes: 
      - pgdata:/var/lib/postgresql/data
  
  redis:
    image: redis:alpine

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

volumes:
  pgdata:

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

北恋 2025-02-14 11:29:17

发现按照以下数量定义卷不会引发任何错误,这意味着我不会遗漏它。

celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    volumes:
      - api-volume:/api
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

volumes:
  api-volume:
    external: false

Found that defining the volume as follows throws no errors and means I don't leave it out.

celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    volumes:
      - api-volume:/api
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

volumes:
  api-volume:
    external: false
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文