diff --git a/README.md b/README.md
index eac1477999e7b4ded5d9bf8ba28eb3b335fade6a..462bb8b3b86433e65893fea02c9703c53a60f495 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,35 @@
-1. Run the server-side FastAPI app in one terminal window:
+**README**
+==========
+# Run the server-side FastAPI app
+## In development mode
+You can start a virtual environment with the following instructions.
 
-    ```sh
-    $ cd backend
-    $ python3.9 -m venv env
-    $ source env/bin/activate
-    (env)$ pip install -r requirements.txt
-    (env)$ python main.py
-    ```
+```sh
+$ cd backend
+$ python3.9 -m venv env
+$ source env/bin/activate
+```
 
-    Navigate to [http://localhost:8000](http://localhost:8000)
+Then you need to install the dependencies by executing `pip install -r requirements.txt`  
+Comment the app service in the docker-compose.yml then build and run the file.  
+Start the development server running `python -m uvicorn main:app --reload --port=3001 --host 0.0.0.0`
 
-1. Run the client-side React app in a different terminal window:
+Navigate to [http://localhost:3001](http://localhost:3001)
 
-    ```sh
-    $ cd frontend
-    $ npm install
-    $ npm run start
-    ```
+<br/>  
 
-    Navigate to [http://localhost:3000](http://localhost:3000)
+## In production mode
+Build the docker image : `docker-compose build`  
+Run the server, `docker-compose up -d`
+
+<br/>  
+
+# Run the client-side React app in a different terminal window:
+
+```sh
+$ cd frontend
+$ npm install
+$ npm run start
+```
+
+Navigate to [http://localhost:3000](http://localhost:3000)
diff --git a/backend/.dockerignore b/backend/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..bc3d69fc86ec9dda5c8a1b7e7d67254d407ee9fb
--- /dev/null
+++ b/backend/.dockerignore
@@ -0,0 +1,8 @@
+.dockerignore
+.env.template
+.gitignore
+docker-compose.yml
+Dockerfile
+README.md
+__pycache__
+env
\ No newline at end of file
diff --git a/backend/Dockerfile b/backend/Dockerfile
index 2bf67b80acabca706b87cbf3a550423a1769ae4e..75839870b57ec62e2736f7252bba752eb61484b0 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -20,4 +20,4 @@ COPY --from=build /venv /venv
 WORKDIR /backend
 COPY . .
 
-ENTRYPOINT ["python3", "main.py"]
\ No newline at end of file
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]
\ No newline at end of file
diff --git a/backend/app.py b/backend/app.py
deleted file mode 100644
index 24562b55dcb8cec9d44e73125fdabb5175c88b1f..0000000000000000000000000000000000000000
--- a/backend/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from typing import List
-from fastapi import Body, Depends, FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from sqlalchemy.orm import Session
-from dotenv import load_dotenv
-import os
-
-from db import crud, schemas, database
-
-
-# load environment variables
-load_dotenv()
-
-app = FastAPI(docs_url="/api/docs", openapi_url="/api/openapi.json")
-
-origins = [
-    os.getenv('WEB_ROOT'),
-]
-
-app.add_middleware(
-    CORSMiddleware,
-    allow_origins=origins,
-    allow_credentials=True,
-    allow_methods=["*"],
-    allow_headers=["*"]
-)
-
-def get_db():
-    """Create a database session."""
-    db = database.SessionLocal()
-    try:
-        yield db
-    finally:
-        db.close()
-
-
-@app.get('/api/{place}', response_model=List[schemas.Record])
-async def eatfast(place: str, db: Session = Depends(get_db)):
-    return crud.get_records(place, db)
-
-
-@app.post('/api/create', response_model=schemas.Record)
-async def post(record: schemas.RecordBase = Body(...), db: Session = Depends(get_db)):
-    return crud.create_record(record, db)
-
-
-"""
-import cv2
-import numpy as np
-import keras
-
-from utils.preprocessing import fix_singular_shape, norm_by_imagenet
-
-
-model = keras.models.load_model('model')
-
-# contours of the zone of a picture that should be analyzed by the model
-contours = {
-    'eiffel': [[70, 370], [420, 720], [1280, 720], [1280, 250], [930, 215], [450, 550], [130, 350]]
-}
-
-masks = {}
-for key, polygon in contours.items():
-    mask = np.zeros((1280, 720, 3), dtype=np.unit8)
-    cv2.fillPoly(mask, [polygon], (255, 255, 255))
-    masks[key] = mask
-
-
-@app.get("/estimate/{id}")
-async def estimate_(id: str) -> float:
-    # img = fetch(...)
-    img = np.zeros((1280, 720, 3))
-    resized_img = cv2.cvtColor(cv2.resize(img, (1280, 720)), cv2.COLOR_BGR2RGB).astype(np.float32)
-    masked_img = cv2.bitwise_and(resized_img, mask[id])
-    treated_img = fix_singular_shape(masked_img, 16)
-    input_image = np.expand_dims(np.squeeze(norm_by_imagenet([treated_img])), axis=0)
-    pred_map = np.squeeze(model.predict(input_image))
-    count_prediction = np.sum(pred_map)
-    return count_prediction
-"""
\ No newline at end of file
diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml
index 374cc6629b5bdc19ae863ea4adbab8d185928386..76308ff5943bc2cc7431b8ba4737eabe5e72a230 100644
--- a/backend/docker-compose.yml
+++ b/backend/docker-compose.yml
@@ -9,17 +9,22 @@ services:
     command: ["mysqld", "--authentication-policy=mysql_native_password"]
     ports:
       - "3306:3306"
+    volumes:
+      - mysql-db:/var/lib/mysql
 
-  # app:
-  #   container_name: "app"
-  #   build: . 
-  #   depends_on:
-  #     - db
-  #   restart: always
-  #   ports:
-  #     - 3000:3000
-  #   env_file: .env
-  #   environment:
-  #     DB_HOST: db
-  #   links:
-  #     - db
\ No newline at end of file
+  app:
+    container_name: "app"
+    build: . 
+    depends_on:
+      - db
+    restart: always
+    ports:
+      - 8000:80
+    env_file: .env
+    environment:
+      DB_HOST: db
+    links:
+      - db
+
+volumes:
+  mysql-db:
\ No newline at end of file
diff --git a/backend/main.py b/backend/main.py
index 9ad9db27f031ef7bf37d268444b4a88c91c38a26..3d1329d138ee8e3142058bcde1a28087a3a3ea39 100644
--- a/backend/main.py
+++ b/backend/main.py
@@ -1,9 +1,86 @@
-import uvicorn
-from db import models, database
+from typing import List
+from fastapi import Body, Depends, FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+from sqlalchemy.orm import Session
+from dotenv import load_dotenv
+import os
 
+from db import crud, schemas, database, models
 
-# Database creation
-models.Base.metadata.create_all(bind=database.engine)
 
-if __name__ == "__main__":
-    uvicorn.run("app:app", host="0.0.0.0", port=3000, reload=True)
+# load environment variables
+load_dotenv()
+
+app = FastAPI(docs_url="/api/docs", openapi_url="/api/openapi.json")
+
+origins = [
+    os.getenv('WEB_ROOT'),
+]
+
+app.add_middleware(
+    CORSMiddleware,
+    allow_origins=origins,
+    allow_credentials=True,
+    allow_methods=["*"],
+    allow_headers=["*"]
+)
+
+def get_db():
+    """Create a database session."""
+    db = database.SessionLocal()
+    try:
+        yield db
+    finally:
+        db.close()
+
+
+@app.on_event("startup")
+def on_startup():
+    # Database creation
+    models.Base.metadata.create_all(bind=database.engine)
+
+
+@app.get('/api/{place}', response_model=List[schemas.Record])
+async def eatfast(place: str, db: Session = Depends(get_db)):
+    return crud.get_records(place, db)
+
+
+@app.post('/api/create', response_model=schemas.Record)
+async def post(record: schemas.RecordBase = Body(...), db: Session = Depends(get_db)):
+    return crud.create_record(record, db)
+
+
+"""
+import cv2
+import numpy as np
+import keras
+
+from utils.preprocessing import fix_singular_shape, norm_by_imagenet
+
+
+model = keras.models.load_model('model')
+
+# contours of the zone of a picture that should be analyzed by the model
+contours = {
+    'eiffel': [[70, 370], [420, 720], [1280, 720], [1280, 250], [930, 215], [450, 550], [130, 350]]
+}
+
+masks = {}
+for key, polygon in contours.items():
+    mask = np.zeros((1280, 720, 3), dtype=np.unit8)
+    cv2.fillPoly(mask, [polygon], (255, 255, 255))
+    masks[key] = mask
+
+
+@app.get("/estimate/{id}")
+async def estimate_(id: str) -> float:
+    # img = fetch(...)
+    img = np.zeros((1280, 720, 3))
+    resized_img = cv2.cvtColor(cv2.resize(img, (1280, 720)), cv2.COLOR_BGR2RGB).astype(np.float32)
+    masked_img = cv2.bitwise_and(resized_img, mask[id])
+    treated_img = fix_singular_shape(masked_img, 16)
+    input_image = np.expand_dims(np.squeeze(norm_by_imagenet([treated_img])), axis=0)
+    pred_map = np.squeeze(model.predict(input_image))
+    count_prediction = np.sum(pred_map)
+    return count_prediction
+"""
\ No newline at end of file