diff --git a/VAE/01-VAE-with-MNIST.ipynb b/VAE/01-VAE-with-MNIST.ipynb
index 3c2cf7bcab759d88768e81f9b3afceed48ad5142..e155c66f7170cb1230c169bff32dc20059b38789 100644
--- a/VAE/01-VAE-with-MNIST.ipynb
+++ b/VAE/01-VAE-with-MNIST.ipynb
@@ -172,8 +172,6 @@
     "import fidle.pwk as pwk\n",
     "\n",
     "run_dir = './run/MNIST.001'                     # Output directory\n",
-    "run_dir = os.getenv('FIDLE_RUN_DIR', run_dir)   # Can be override by an env var (batch)\n",
-    "\n",
     "datasets_dir = pwk.init('VAE1', run_dir)\n",
     "\n",
     "VAE.about()"
@@ -440,7 +438,7 @@
     }
    ],
    "source": [
-    "n          = 60000\n",
+    "n          = 70000\n",
     "batch_size = 64\n",
     "epochs     = 10\n",
     "\n",
diff --git a/VAE/06-Prepare-CelebA-datasets.ipynb b/VAE/06-Prepare-CelebA-datasets.ipynb
index d57b7d5c08ca1accc26fd6ee5e9dc7eae404ea3d..4fbd5797f5b410b4a658afa061227715e4792041 100644
--- a/VAE/06-Prepare-CelebA-datasets.ipynb
+++ b/VAE/06-Prepare-CelebA-datasets.ipynb
@@ -114,10 +114,10 @@
      "text": [
       "Version              : 0.6.1 DEV\n",
       "Notebook id          : VAE6\n",
-      "Run time             : Monday 4 January 2021, 23:45:46\n",
-      "TensorFlow version   : 2.2.0\n",
-      "Keras version        : 2.3.0-tf\n",
-      "Datasets dir         : /home/pjluc/datasets/fidle\n",
+      "Run time             : Wednesday 6 January 2021, 10:55:30\n",
+      "TensorFlow version   : 2.4.0\n",
+      "Keras version        : 2.4.0\n",
+      "Datasets dir         : /gpfswork/rech/mlh/uja62cb/datasets\n",
       "Run dir              : ./run\n",
       "CI running mode      : none\n",
       "Update keras cache   : False\n",
@@ -301,9 +301,9 @@
     "# ---- Parameters you can change -----------------------------------\n",
     "\n",
     "# ---- Tests\n",
-    "scale       = 0.2\n",
-    "image_size  = (128,128)\n",
-    "output_dir  = './data'\n",
+    "# scale       = 0.2\n",
+    "# image_size  = (128,128)\n",
+    "# output_dir  = './data'\n",
     "\n",
     "# ---- Full clusters generation, medium size\n",
     "# scale       = 1.\n",
@@ -311,9 +311,9 @@
     "# output_dir = f'{datasets_dir}/celeba/enhanced'\n",
     "\n",
     "# ---- Full clusters generation, large size\n",
-    "# scale       = 1.\n",
-    "# image_size  = (192,160)\n",
-    "# output_dir = f'{datasets_dir}/celeba/enhanced'"
+    "scale       = 1.\n",
+    "image_size  = (192,160)\n",
+    "output_dir = f'{datasets_dir}/celeba/enhanced'"
    ]
   },
   {
@@ -337,12 +337,12 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Scale is : 0.2\n",
-      "Image size is     : (128, 128)\n",
-      "dataset length is : 40519\n",
-      "cluster size is   : 2000\n",
+      "Scale is : 1.0\n",
+      "Image size is     : (192, 160)\n",
+      "dataset length is : 202599\n",
+      "cluster size is   : 10000\n",
       "clusters nb  is   : 21\n",
-      "cluster dir  is   : ./data/clusters-128x128\n"
+      "cluster dir  is   : /gpfswork/rech/mlh/uja62cb/datasets/celeba/enhanced/clusters-192x160\n"
      ]
     },
     {
@@ -361,27 +361,27 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Cluster 000 :    [########################################] 100.0% of 2000\n",
-      "Cluster 001 :    [########################################] 100.0% of 2000\n",
-      "Cluster 002 :    [########################################] 100.0% of 2000\n",
-      "Cluster 003 :    [########################################] 100.0% of 2000\n",
-      "Cluster 004 :    [########################################] 100.0% of 2000\n",
-      "Cluster 005 :    [########################################] 100.0% of 2000\n",
-      "Cluster 006 :    [########################################] 100.0% of 2000\n",
-      "Cluster 007 :    [########################################] 100.0% of 2000\n",
-      "Cluster 008 :    [########################################] 100.0% of 2000\n",
-      "Cluster 009 :    [########################################] 100.0% of 2000\n",
-      "Cluster 010 :    [########################################] 100.0% of 2000\n",
-      "Cluster 011 :    [########################################] 100.0% of 2000\n",
-      "Cluster 012 :    [########################################] 100.0% of 2000\n",
-      "Cluster 013 :    [########################################] 100.0% of 2000\n",
-      "Cluster 014 :    [########################################] 100.0% of 2000\n",
-      "Cluster 015 :    [########################################] 100.0% of 2000\n",
-      "Cluster 016 :    [########################################] 100.0% of 2000\n",
-      "Cluster 017 :    [########################################] 100.0% of 2000\n",
-      "Cluster 018 :    [########################################] 100.0% of 2000\n",
-      "Cluster 019 :    [########################################] 100.0% of 2000\n",
-      "Cluster 020 :    [##########------------------------------]  25.0% of 2000\r"
+      "Cluster 000 :    [########################################] 100.0% of 10000\n",
+      "Cluster 001 :    [########################################] 100.0% of 10000\n",
+      "Cluster 002 :    [########################################] 100.0% of 10000\n",
+      "Cluster 003 :    [########################################] 100.0% of 10000\n",
+      "Cluster 004 :    [########################################] 100.0% of 10000\n",
+      "Cluster 005 :    [########################################] 100.0% of 10000\n",
+      "Cluster 006 :    [########################################] 100.0% of 10000\n",
+      "Cluster 007 :    [########################################] 100.0% of 10000\n",
+      "Cluster 008 :    [########################################] 100.0% of 10000\n",
+      "Cluster 009 :    [########################################] 100.0% of 10000\n",
+      "Cluster 010 :    [########################################] 100.0% of 10000\n",
+      "Cluster 011 :    [########################################] 100.0% of 10000\n",
+      "Cluster 012 :    [########################################] 100.0% of 10000\n",
+      "Cluster 013 :    [########################################] 100.0% of 10000\n",
+      "Cluster 014 :    [########################################] 100.0% of 10000\n",
+      "Cluster 015 :    [########################################] 100.0% of 10000\n",
+      "Cluster 016 :    [########################################] 100.0% of 10000\n",
+      "Cluster 017 :    [########################################] 100.0% of 10000\n",
+      "Cluster 018 :    [########################################] 100.0% of 10000\n",
+      "Cluster 019 :    [########################################] 100.0% of 10000\n",
+      "Cluster 020 :    [##########------------------------------]  25.0% of 10000\r"
      ]
     },
     {
@@ -400,8 +400,8 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Duration     :  0:05:06\n",
-      "Size         :  14.8 Go\n"
+      "Duration     :  0:50:59\n",
+      "Size         :  139.1 Go\n"
      ]
     }
    ],
@@ -444,8 +444,8 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "End time is : Monday 4 January 2021, 23:50:54\n",
-      "Duration is : 00:05:08 166ms\n",
+      "End time is : Wednesday 6 January 2021, 11:46:31\n",
+      "Duration is : 00:51:00 416ms\n",
       "This notebook ends here\n"
      ]
     }
diff --git a/VAE/08-VAE-with-CelebA.ipynb b/VAE/08-VAE-with-CelebA.ipynb
index b95494692b8ba4d7e66fa4be79397c764ed85483..01abdcd632c1d98b2f1b5d66b43af09fbb01cd92 100644
--- a/VAE/08-VAE-with-CelebA.ipynb
+++ b/VAE/08-VAE-with-CelebA.ipynb
@@ -191,8 +191,6 @@
     "import fidle.pwk as pwk\n",
     "\n",
     "run_dir = './run/CelebA-s.001'                  # Output directory\n",
-    "run_dir = os.getenv('FIDLE_RUN_DIR', run_dir)   # Can be override by an env var (batch)\n",
-    "\n",
     "datasets_dir = pwk.init('VAE8', run_dir)\n",
     "\n",
     "VAE.about()\n",
@@ -276,8 +274,9 @@
     }
    ],
    "source": [
-    "# ---- Used for continous integration - Just forget this 2 lines\n",
+    "# ---- Used for continous integration or batch mode - Just forget this 2 lines\n",
     "#\n",
+    "scale        = pwk.override('scale',        scale)\n",
     "image_size   = pwk.override('image_size',   image_size)\n",
     "enhanced_dir = pwk.override('enhanced_dir', enhanced_dir)\n",
     "\n",
diff --git a/VAE/batch_slurm.sh b/VAE/batch_slurm.sh
index bbdc1f5ccdd4502acc84aaf627d200f5af786c61..8a66cdde344276848a8973be63e5a06f671bf05d 100755
--- a/VAE/batch_slurm.sh
+++ b/VAE/batch_slurm.sh
@@ -35,8 +35,13 @@ NOTEBOOK_DIR="$WORK/fidle/VAE"
 # NOTEBOOK_SRC="01-VAE-with-MNIST.ipynb"
 # FIDLE_RUN_DIR="./run/MNIST.$SLURM_JOB_ID"
 
+
 NOTEBOOK_SRC="08-VAE-with-CelebA.ipynb"
-FIDLE_RUN_DIR="./run/CelebA.$SLURM_JOB_ID"
+
+FIDLE_OVERRIDE_VAE8_run_dir="./run/CelebA.$SLURM_JOB_ID"
+FIDLE_OVERRIDE_VAE8_scale="0.05"
+FIDLE_OVERRIDE_VAE8_image_size="(128,128)"
+FIDLE_OVERRIDE_VAE8_enhanced_dir='{datasets_dir}/celeba/enhanced'
 
 NOTEBOOK_OUT="${NOTEBOOK_SRC%.*}==${SLURM_JOB_ID}==.ipynb"
 
@@ -52,7 +57,7 @@ echo '------------------------------------------------------------'
 echo "Notebook dir  : $NOTEBOOK_DIR"
 echo "Notebook src  : $NOTEBOOK_SRC"
 echo "Notebook out  : $NOTEBOOK_OUT"
-echo "Run dir       : $FIDLE_RUN_DIR"
+echo "Run dir       : $FIDLE_OVERRIDE_VAE8_run_dir"
 echo "Environment   : $MODULE_ENV"
 echo '------------------------------------------------------------'
 
diff --git a/fidle/config.py b/fidle/config.py
index e0b254ab71d0cc1bac3f24e1cb3bcd1ccf06e2ae..922ccea164e06848978fcd9d7c921cb9f0826864 100644
--- a/fidle/config.py
+++ b/fidle/config.py
@@ -43,37 +43,32 @@ FINISHED_FILE  = '../fidle/log/finished.json'
 #
 CI_REPORT      = '../fidle/log/ci_report.html'
 
-# ---- Defaul mode (free|full|smart) --------------------------------
-#      Overrided by env : FIDLE_RUNNING_MODE
-#
-DEFAULT_RUNNING_MODE = 'none'
-
-# ---- CI Override parameters --------------------------------------
+# ---- CI Override parameters examples -----------------------------
 #
 # ---- Preparation of GTSRB dataset
-GTSRB1_smart_scale      = 0.1
-GTSRB1_smart_output_dir = './data'
-GTSRB1_full_scale       = 1
-GTSRB1_full_output_dir  = '{datasets_dir}/GTSRB/enhanced'
+# FIDLE_OVERRIDE_GTSRB1_scale      = 0.1
+# FIDLE_OVERRIDE_GTSRB1_output_dir = './data'
+# FIDLE_OVERRIDE_GTSRB1_scale       = 1
+# FIDLE_OVERRIDE_GTSRB1_output_dir  = '{datasets_dir}/GTSRB/enhanced'
 
-# ---- Preparation of CelebA dataset
-VAE6_smart_scale        = 0.2
-VAE6_smart_image_size   = (128,128)
-VAE6_smart_output_dir   = './data'
-VAE6_full_scale         = 1
-VAE6_full_image_size    = (192,160)
-VAE6_full_output_dir    = '{datasets_dir}/celeba/enhanced'
+# # ---- Preparation of CelebA dataset
+# FIDLE_OVERRIDE_VAE6_scale        = 0.2
+# FIDLE_OVERRIDE_VAE6_image_size   = (128,128)
+# FIDLE_OVERRIDE_VAE6_output_dir   = './data'
+# FIDLE_OVERRIDE_VAE6_scale         = 1
+# FIDLE_OVERRIDE_VAE6_image_size    = (192,160)
+# FIDLE_OVERRIDE_VAE6_output_dir    = '{datasets_dir}/celeba/enhanced'
 
-# ---- Check CelebA dataset
-VAE7_smart_image_size   = (128,128)
-VAE7_smart_enhanced_dir = './data'
-VAE7_full_image_size    = (192,160)
-VAE7_full_enhanced_dir  = '{datasets_dir}/celeba/enhanced'
+# # ---- Check CelebA dataset
+# FIDLE_OVERRIDE_VAE7_image_size   = (128,128)
+# FIDLE_OVERRIDE_VAE7_enhanced_dir = './data'
+# FIDLE_OVERRIDE_VAE7_image_size    = (192,160)
+# FIDLE_OVERRIDE_VAE7_enhanced_dir  = '{datasets_dir}/celeba/enhanced'
 
-# ---- VAE with CelebA
-VAE8_smart_scale        = 1.
-VAE8_smart_image_size   = (128,128)
-VAE8_smart_enhanced_dir = './data'
-VAE8_full_scale         = 1.
-VAE8_full_image_size    = (192,160)
-VAE8_full_enhanced_dir  = '{datasets_dir}/celeba/enhanced'
+# # ---- VAE with CelebA
+# FIDLE_OVERRIDE_VAE8_scale        = 1.
+# FIDLE_OVERRIDE_VAE8_image_size   = (128,128)
+# FIDLE_OVERRIDE_VAE8_enhanced_dir = './data'
+# FIDLE_OVERRIDE_VAE8_scale         = 1.
+# FIDLE_OVERRIDE_VAE8_image_size    = (192,160)
+# FIDLE_OVERRIDE_VAE8_enhanced_dir  = '{datasets_dir}/celeba/enhanced'
diff --git a/fidle/log/finished.json b/fidle/log/finished.json
index bd41e173c1727945cf43a3ea1cca26daee536b5b..06288e3c67f591b74aae8360d7d04a6c91d38b5b 100644
--- a/fidle/log/finished.json
+++ b/fidle/log/finished.json
@@ -126,10 +126,10 @@
         "duration": "00:00:10 061ms"
     },
     "VAE6": {
-        "path": "/home/pjluc/dev/fidle/VAE",
-        "start": "Monday 4 January 2021, 23:45:46",
-        "end": "Monday 4 January 2021, 23:50:54",
-        "duration": "00:05:08 166ms"
+        "path": "/gpfsdswork/projects/rech/mlh/uja62cb/fidle/VAE",
+        "start": "Wednesday 6 January 2021, 10:55:30",
+        "end": "Wednesday 6 January 2021, 11:46:31",
+        "duration": "00:51:00 416ms"
     },
     "GTS1": {
         "path": "/home/pjluc/dev/fidle/GTSRB",
@@ -138,10 +138,10 @@
         "duration": "00:03:05 030ms"
     },
     "GTSRB1": {
-        "path": "/home/pjluc/dev/fidle/GTSRB",
-        "start": "Thursday 31 December 2020, 12:34:29",
-        "end": "Thursday 31 December 2020, 12:36:22",
-        "duration": "00:01:53 128ms"
+        "path": "/gpfsdswork/projects/rech/mlh/uja62cb/fidle/GTSRB",
+        "start": "Wednesday 6 January 2021, 11:06:40",
+        "end": "",
+        "duration": "Unfinished..."
     },
     "VAE7": {
         "path": "/home/pjluc/dev/fidle/VAE",
@@ -151,7 +151,7 @@
     },
     "VAE8": {
         "path": "/gpfsdswork/projects/rech/mlh/uja62cb/fidle/VAE",
-        "start": "Wednesday 6 January 2021, 01:06:17",
+        "start": "Wednesday 6 January 2021, 12:17:53",
         "end": "",
         "duration": "Unfinished..."
     },
diff --git a/fidle/pwk.py b/fidle/pwk.py
index 83a393a652f7d656d843a46dfff9192c0eb127a3..fea2934e6bdcf9dbe16a2f4ad56ca4ced951af50 100644
--- a/fidle/pwk.py
+++ b/fidle/pwk.py
@@ -57,7 +57,6 @@ _chrono_stop  = None
 def init(name=None, run_dir='./run'):
     global notebook_id
     global datasets_dir
-    global running_mode
     global _start_time
     
     # ---- Parameters from config.py
@@ -71,10 +70,6 @@ def init(name=None, run_dir='./run'):
     matplotlib.style.use(mplstyle)
     load_cssfile(cssfile)
     
-    # ---- Create subdirs
-    #
-    mkdir(run_dir)
-    
     # ---- datasets location
     #
     datasets_dir = os.getenv('FIDLE_DATASETS_DIR', False)
@@ -82,11 +77,11 @@ def init(name=None, run_dir='./run'):
         error_datasets_not_found()
     # Resolve tilde...
     datasets_dir=os.path.expanduser(datasets_dir)
-    
-    # ---- Running mode
+        
+    # ---- run_dir
     #
-    running_mode = os.getenv('FIDLE_RUNNING_MODE', config.DEFAULT_RUNNING_MODE)
-    running_mode = running_mode.lower()
+    run_dir = override('run_dir',run_dir)
+    mkdir(run_dir)
     
     # ---- Update Keras cache
     #
@@ -106,7 +101,6 @@ def init(name=None, run_dir='./run'):
     print('Keras version        :', tf.keras.__version__)
     print('Datasets dir         :', datasets_dir)
     print('Run dir              :', run_dir)
-    print('CI running mode      :', running_mode)
     print('Update keras cache   :', updated)
 
     # ---- Save figs or not
@@ -162,19 +156,41 @@ def error_datasets_not_found():
 # -------------------------------------------------------------
 # param_override
 # -------------------------------------------------------------
-# 
+# Try to override a given parameter 'param'.
+#
 def override(name, value):
-    # ---- Pas de mode override actif
-    if running_mode not in ['smart','full']: return value
-    # ---- Get entry name in config
-    entry=f'{notebook_id}_{running_mode}_{name}'
-    # ---- Get value
-    assert hasattr(config, entry), f'Override error : Cannot find entry [{entry}] in config.'
-    new_value=getattr(config,entry)
-    if isinstance(new_value, str) : 
-        new_value=new_value.format(datasets_dir=datasets_dir, notebook_id=notebook_id)
+    '''
+    Try to override a given parameter (name,value) with an environment variable.
+    Env variable name is : FIDLE_OVERRIDE_<NOTEBOOK-ID>_<NAME>
+    If no env variable is available, return the given value.
+    If type is str, substitution is done with notebook_id and datasets_dir
+    params:
+       name : parameter name
+       value: parameter value
+    return :
+       eval(env variable), if it env variable exist, or given value
+    '''
+    # ---- Environment variable name
+    #
+    env_name  = f'FIDLE_OVERRIDE_{notebook_id}_{name}'
+    env_value = os.environ.get(env_name) 
+    
+    # ---- Doesn't exist ?
+    #
+    if env_value is None:
+        return value
+    
+    # ---- Exist
+    #
+    if isinstance(value, str) : 
+        new_value = env_value.format(datasets_dir=datasets_dir, notebook_id=notebook_id)
+        
+    if type(value) in [ tuple, int, float]:
+        new_value = eval(env_value)
+    
     # ---- Return 
-    print(f'Override : running mode is [{running_mode}] Parameter [{name}={value}] set to [{new_value}]')
+    #
+    print(f'Override : Parameter [{name}={value}] set to [{new_value}]')
     return new_value