Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • daconcea/fidle
  • bossardl/fidle
  • Julie.Remenant/fidle
  • abijolao/fidle
  • monsimau/fidle
  • karkars/fidle
  • guilgautier/fidle
  • cailletr/fidle
  • talks/fidle
9 results
Show changes
{
"LINR1": {
"id": "LINR1",
"dirname": "LinearReg",
"basename": "01-Linear-Regression.ipynb",
"title": "Linear regression with direct resolution",
"description": "Direct determination of linear regression "
},
"GRAD1": {
"id": "GRAD1",
"dirname": "LinearReg",
"basename": "02-Gradient-descent.ipynb",
"title": "Linear regression with gradient descent",
"description": "An example of gradient descent in the simple case of a linear regression."
},
"POLR1": {
"id": "POLR1",
"dirname": "LinearReg",
"basename": "03-Polynomial-Regression.ipynb",
"title": "Complexity Syndrome",
"description": "Illustration of the problem of complexity with the polynomial regression"
},
"LOGR1": {
"id": "LOGR1",
"dirname": "LinearReg",
"basename": "04-Logistic-Regression.ipynb",
"title": "Logistic regression, in pure Tensorflow",
"description": "Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. "
},
"PER57": {
"id": "PER57",
"dirname": "IRIS",
"basename": "01-Simple-Perceptron.ipynb",
"title": "Perceptron Model 1957",
"description": "A simple perceptron, with the IRIS dataset."
},
"BHP1": {
"id": "BHP1",
"dirname": "BHPD",
"basename": "01-DNN-Regression.ipynb",
"title": "Regression with a Dense Network (DNN)",
"description": "A Simple regression with a Dense Neural Network (DNN) - BHPD dataset"
},
"BHP2": {
"id": "BHP2",
"dirname": "BHPD",
"basename": "02-DNN-Regression-Premium.ipynb",
"title": "Regression with a Dense Network (DNN) - Advanced code",
"description": "More advanced example of DNN network code - BHPD dataset"
},
"MNIST1": {
"id": "MNIST1",
"dirname": "MNIST",
"basename": "01-DNN-MNIST.ipynb",
"title": "Simple classification with DNN",
"description": "Example of classification with a fully connected neural network"
},
"GTS1": {
"id": "GTS1",
"dirname": "GTSRB",
"basename": "01-Preparation-of-data.ipynb",
"title": "CNN with GTSRB dataset - Data analysis and preparation",
"description": "Episode 1 : Data analysis and creation of a usable dataset"
},
"GTS2": {
"id": "GTS2",
"dirname": "GTSRB",
"basename": "02-First-convolutions.ipynb",
"title": "CNN with GTSRB dataset - First convolutions",
"description": "Episode 2 : First convolutions and first results"
},
"GTS3": {
"id": "GTS3",
"dirname": "GTSRB",
"basename": "03-Tracking-and-visualizing.ipynb",
"title": "CNN with GTSRB dataset - Monitoring ",
"description": "Episode 3 : Monitoring and analysing training, managing checkpoints"
},
"GTS4": {
"id": "GTS4",
"dirname": "GTSRB",
"basename": "04-Data-augmentation.ipynb",
"title": "CNN with GTSRB dataset - Data augmentation ",
"description": "Episode 4 : Improving the results with data augmentation"
},
"GTS5": {
"id": "GTS5",
"dirname": "GTSRB",
"basename": "05-Full-convolutions.ipynb",
"title": "CNN with GTSRB dataset - Full convolutions ",
"description": "Episode 5 : A lot of models, a lot of datasets and a lot of results."
},
"GTS6": {
"id": "GTS6",
"dirname": "GTSRB",
"basename": "06-Notebook-as-a-batch.ipynb",
"title": "CNN with GTSRB dataset - Full convolutions as a batch",
"description": "Episode 6 : Run Full convolution notebook as a batch"
},
"GTS7": {
"id": "GTS7",
"dirname": "GTSRB",
"basename": "07-Show-report.ipynb",
"title": "CNN with GTSRB dataset - Show reports",
"description": "Episode 7 : Displaying the reports of the different jobs"
},
"TSB1": {
"id": "TSB1",
"dirname": "GTSRB",
"basename": "99-Scripts-Tensorboard.ipynb",
"title": "Tensorboard with/from Jupyter ",
"description": "4 ways to use Tensorboard from the Jupyter environment"
},
"IMDB1": {
"id": "IMDB1",
"dirname": "IMDB",
"basename": "01-Embedding-Keras.ipynb",
"title": "Text embedding with IMDB",
"description": "A very classical example of word embedding for text classification (sentiment analysis)"
},
"IMDB2": {
"id": "IMDB2",
"dirname": "IMDB",
"basename": "02-Prediction.ipynb",
"title": "Text embedding with IMDB - Reloaded",
"description": "Example of reusing a previously saved model"
},
"IMDB3": {
"id": "IMDB3",
"dirname": "IMDB",
"basename": "03-LSTM-Keras.ipynb",
"title": "Text embedding/LSTM model with IMDB",
"description": "Still the same problem, but with a network combining embedding and LSTM"
},
"SYNOP1": {
"id": "SYNOP1",
"dirname": "SYNOP",
"basename": "01-Preparation-of-data.ipynb",
"title": "Time series with RNN - Preparation of data",
"description": "Episode 1 : Data analysis and creation of a usable dataset"
},
"SYNOP2": {
"id": "SYNOP2",
"dirname": "SYNOP",
"basename": "02-First-predictions.ipynb",
"title": "Time series with RNN - Try a prediction",
"description": "Episode 2 : Training session and first predictions"
},
"SYNOP3": {
"id": "SYNOP3",
"dirname": "SYNOP",
"basename": "03-12h-predictions.ipynb",
"title": "Time series with RNN - 12h predictions",
"description": "Episode 3: Attempt to predict in the longer term "
},
"VAE1": {
"id": "VAE1",
"dirname": "VAE",
"basename": "01-VAE-with-MNIST.nbconvert.ipynb",
"title": "Variational AutoEncoder (VAE) with MNIST",
"description": "Episode 1 : Model construction and Training"
},
"VAE2": {
"id": "VAE2",
"dirname": "VAE",
"basename": "02-VAE-with-MNIST-post.ipynb",
"title": "Variational AutoEncoder (VAE) with MNIST - Analysis",
"description": "Episode 2 : Exploring our latent space"
},
"VAE3": {
"id": "VAE3",
"dirname": "VAE",
"basename": "03-About-CelebA.ipynb",
"title": "About the CelebA dataset",
"description": "Episode 3\u00a0: About the CelebA dataset, a more fun dataset ;-)"
},
"VAE4": {
"id": "VAE4",
"dirname": "VAE",
"basename": "04-Prepare-CelebA-datasets.ipynb",
"title": "Preparation of the CelebA dataset",
"description": "Episode 4\u00a0: Preparation of a clustered dataset, batchable"
},
"VAE5": {
"id": "VAE5",
"dirname": "VAE",
"basename": "05-Check-CelebA.ipynb",
"title": "Checking the clustered CelebA dataset",
"description": "Episode 5\u00a0:\tChecking the clustered dataset"
},
"VAE6": {
"id": "VAE6",
"dirname": "VAE",
"basename": "06-VAE-with-CelebA-s.nbconvert.ipynb",
"title": "Variational AutoEncoder (VAE) with CelebA (small)",
"description": "Episode 6\u00a0: Variational AutoEncoder (VAE) with CelebA (small res.)"
},
"VAE7": {
"id": "VAE7",
"dirname": "VAE",
"basename": "07-VAE-with-CelebA-m.nbconvert.ipynb",
"title": "Variational AutoEncoder (VAE) with CelebA (medium)",
"description": "Episode 7\u00a0: Variational AutoEncoder (VAE) with CelebA (medium res.)"
},
"VAE8": {
"id": "VAE8",
"dirname": "VAE",
"basename": "08-VAE-withCelebA-post.ipynb",
"title": "Variational AutoEncoder (VAE) with CelebA - Analysis",
"description": "Episode 8\u00a0: Exploring latent space of our trained models"
},
"ACTF1": {
"id": "ACTF1",
"dirname": "Misc",
"basename": "Activation-Functions.ipynb",
"title": "Activation functions",
"description": "Some activation functions, with their derivatives."
},
"NP1": {
"id": "NP1",
"dirname": "Misc",
"basename": "Numpy.ipynb",
"title": "A short introduction to Numpy",
"description": "Numpy is an essential tool for the Scientific Python."
}
}
\ No newline at end of file
{
"LINR1": {
"dirname": "LinearReg",
"basename": "01-Linear-Regression.ipynb",
"id": "LINR1",
"title": "Linear regression with direct resolution",
"description": "Direct determination of linear regression "
},
"GRAD1": {
"dirname": "LinearReg",
"basename": "02-Gradient-descent.ipynb",
"id": "GRAD1",
"title": "Linear regression with gradient descent",
"description": "An example of gradient descent in the simple case of a linear regression."
},
"POLR1": {
"dirname": "LinearReg",
"basename": "03-Polynomial-Regression.ipynb",
"id": "POLR1",
"title": "Complexity Syndrome",
"description": "Illustration of the problem of complexity with the polynomial regression"
},
"LOGR1": {
"dirname": "LinearReg",
"basename": "04-Logistic-Regression.ipynb",
"id": "LOGR1",
"title": "Logistic regression, in pure Tensorflow",
"description": "Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. "
},
"PER57": {
"dirname": "IRIS",
"basename": "01-Simple-Perceptron.ipynb",
"id": "PER57",
"title": "Perceptron Model 1957",
"description": "A simple perceptron, with the IRIS dataset."
},
"BHP1": {
"dirname": "BHPD",
"basename": "01-DNN-Regression.ipynb",
"id": "BHP1",
"title": "Regression with a Dense Network (DNN)",
"description": "A Simple regression with a Dense Neural Network (DNN) - BHPD dataset"
},
"BHP2": {
"dirname": "BHPD",
"basename": "02-DNN-Regression-Premium.ipynb",
"id": "BHP2",
"title": "Regression with a Dense Network (DNN) - Advanced code",
"description": "More advanced example of DNN network code - BHPD dataset"
},
"MNIST1": {
"dirname": "MNIST",
"basename": "01-DNN-MNIST.ipynb",
"id": "MNIST1",
"title": "Simple classification with DNN",
"description": "Example of classification with a fully connected neural network"
},
"GTS1": {
"dirname": "GTSRB",
"basename": "01-Preparation-of-data.ipynb",
"id": "GTS1",
"title": "CNN with GTSRB dataset - Data analysis and preparation",
"description": "Episode 1 : Data analysis and creation of a usable dataset"
},
"GTS2": {
"dirname": "GTSRB",
"basename": "02-First-convolutions.ipynb",
"id": "GTS2",
"title": "CNN with GTSRB dataset - First convolutions",
"description": "Episode 2 : First convolutions and first results"
},
"GTS3": {
"dirname": "GTSRB",
"basename": "03-Tracking-and-visualizing.ipynb",
"id": "GTS3",
"title": "CNN with GTSRB dataset - Monitoring ",
"description": "Episode 3 : Monitoring and analysing training, managing checkpoints"
},
"GTS4": {
"dirname": "GTSRB",
"basename": "04-Data-augmentation.ipynb",
"id": "GTS4",
"title": "CNN with GTSRB dataset - Data augmentation ",
"description": "Episode 4 : Improving the results with data augmentation"
},
"GTS5": {
"dirname": "GTSRB",
"basename": "05-Full-convolutions.ipynb",
"id": "GTS5",
"title": "CNN with GTSRB dataset - Full convolutions ",
"description": "Episode 5 : A lot of models, a lot of datasets and a lot of results."
},
"GTS6": {
"dirname": "GTSRB",
"basename": "06-Notebook-as-a-batch.ipynb",
"id": "GTS6",
"title": "CNN with GTSRB dataset - Full convolutions as a batch",
"description": "Episode 6 : Run Full convolution notebook as a batch"
},
"GTS7": {
"dirname": "GTSRB",
"basename": "07-Show-report.ipynb",
"id": "GTS7",
"title": "CNN with GTSRB dataset - Show reports",
"description": "Episode 7 : Displaying the reports of the different jobs"
},
"TSB1": {
"dirname": "GTSRB",
"basename": "99-Scripts-Tensorboard.ipynb",
"id": "TSB1",
"title": "Tensorboard with/from Jupyter ",
"description": "4 ways to use Tensorboard from the Jupyter environment"
},
"IMDB1": {
"dirname": "IMDB",
"basename": "01-Embedding-Keras.ipynb",
"id": "IMDB1",
"title": "Text embedding with IMDB",
"description": "A very classical example of word embedding for text classification (sentiment analysis)"
},
"IMDB2": {
"dirname": "IMDB",
"basename": "02-Prediction.ipynb",
"id": "IMDB2",
"title": "Text embedding with IMDB - Reloaded",
"description": "Example of reusing a previously saved model"
},
"IMDB3": {
"dirname": "IMDB",
"basename": "03-LSTM-Keras.ipynb",
"id": "IMDB3",
"title": "Text embedding/LSTM model with IMDB",
"description": "Still the same problem, but with a network combining embedding and LSTM"
},
"SYNOP1": {
"dirname": "SYNOP",
"basename": "01-Preparation-of-data.ipynb",
"id": "SYNOP1",
"title": "Time series with RNN - Preparation of data",
"description": "Episode 1 : Data analysis and creation of a usable dataset"
},
"SYNOP2": {
"dirname": "SYNOP",
"basename": "02-First-predictions.ipynb",
"id": "SYNOP2",
"title": "Time series with RNN - Try a prediction",
"description": "Episode 2 : Training session and first predictions"
},
"SYNOP3": {
"dirname": "SYNOP",
"basename": "03-12h-predictions.ipynb",
"id": "SYNOP3",
"title": "Time series with RNN - 12h predictions",
"description": "Episode 3: Attempt to predict in the longer term "
},
"VAE1": {
"dirname": "VAE",
"basename": "01-VAE-with-MNIST.nbconvert.ipynb",
"id": "VAE1",
"title": "Variational AutoEncoder (VAE) with MNIST",
"description": "Episode 1 : Model construction and Training"
},
"VAE2": {
"dirname": "VAE",
"basename": "02-VAE-with-MNIST-post.ipynb",
"id": "VAE2",
"title": "Variational AutoEncoder (VAE) with MNIST - Analysis",
"description": "Episode 2 : Exploring our latent space"
},
"VAE3": {
"dirname": "VAE",
"basename": "03-About-CelebA.ipynb",
"id": "VAE3",
"title": "About the CelebA dataset",
"description": "Episode 3\u00a0: About the CelebA dataset, a more fun dataset ;-)"
},
"VAE4": {
"dirname": "VAE",
"basename": "04-Prepare-CelebA-datasets.ipynb",
"id": "VAE4",
"title": "Preparation of the CelebA dataset",
"description": "Episode 4\u00a0: Preparation of a clustered dataset, batchable"
},
"VAE5": {
"dirname": "VAE",
"basename": "05-Check-CelebA.ipynb",
"id": "VAE5",
"title": "Checking the clustered CelebA dataset",
"description": "Episode 5\u00a0:\tChecking the clustered dataset"
},
"VAE6": {
"dirname": "VAE",
"basename": "06-VAE-with-CelebA-s.nbconvert.ipynb",
"id": "VAE6",
"title": "Variational AutoEncoder (VAE) with CelebA (small)",
"description": "Episode 6\u00a0: Variational AutoEncoder (VAE) with CelebA (small res.)"
},
"VAE7": {
"dirname": "VAE",
"basename": "07-VAE-with-CelebA-m.nbconvert.ipynb",
"id": "VAE7",
"title": "Variational AutoEncoder (VAE) with CelebA (medium)",
"description": "Episode 7\u00a0: Variational AutoEncoder (VAE) with CelebA (medium res.)"
},
"VAE8": {
"dirname": "VAE",
"basename": "08-VAE-withCelebA-post.ipynb",
"id": "VAE8",
"title": "Variational AutoEncoder (VAE) with CelebA - Analysis",
"description": "Episode 8\u00a0: Exploring latent space of our trained models"
},
"ACTF1": {
"dirname": "Misc",
"basename": "Activation-Functions.ipynb",
"id": "ACTF1",
"title": "Activation functions",
"description": "Some activation functions, with their derivatives."
},
"NP1": {
"dirname": "Misc",
"basename": "Numpy.ipynb",
"id": "NP1",
"title": "A short introduction to Numpy",
"description": "Numpy is an essential tool for the Scientific Python."
}
}
\ No newline at end of file
<html>
<head><title>FIDLE - CI Report</title></head>
<style>
body{
font-family: sans-serif;
}
a{
color: SteelBlue;
text-decoration:none;
}
table{
border-collapse : collapse;
font-size : 80%
}
td{
border-style: solid;
border-width: thin;
border-color: lightgrey;
padding: 5px;
}
.header{ padding:20px 0px 0px 30px; }
.result{ padding:10px 0px 20px 30px; }
</style>
<body>
<br>Hi,
<p>Below is the result of the continuous integration tests of the Fidle project:</p>
<div class="header"><b>Report date :</b> Tuesday 15 December 2020, 22:06:09</div>
<div class="result">
<style type="text/css" >
#T_5acb85fa_3f19_11eb_8e56_19607a97f796 td {
font-size: 110%;
text-align: left;
} #T_5acb85fa_3f19_11eb_8e56_19607a97f796 th {
font-size: 110%;
text-align: left;
}</style><table id="T_5acb85fa_3f19_11eb_8e56_19607a97f796" ><thead> <tr> <th class="col_heading level0 col0" >id</th> <th class="col_heading level0 col1" >repo</th> <th class="col_heading level0 col2" >name</th> <th class="col_heading level0 col3" >start</th> <th class="col_heading level0 col4" >end</th> <th class="col_heading level0 col5" >duration</th> </tr></thead><tbody>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col0" class="data row0 col0" ><a href="../LinearReg/01-Linear-Regression.ipynb">LINR1</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col1" class="data row0 col1" >LinearReg</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col2" class="data row0 col2" ><a href="../LinearReg/01-Linear-Regression.ipynb"><b>01-Linear-Regression.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col3" class="data row0 col3" >Tuesday 15 December 2020, 14:04:04</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col4" class="data row0 col4" >Tuesday 15 December 2020, 14:04:04</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row0_col5" class="data row0 col5" >00:00:00 295ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col0" class="data row1 col0" ><a href="../LinearReg/02-Gradient-descent.ipynb">GRAD1</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col1" class="data row1 col1" >LinearReg</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col2" class="data row1 col2" ><a href="../LinearReg/02-Gradient-descent.ipynb"><b>02-Gradient-descent.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col3" class="data row1 col3" >Tuesday 15 December 2020, 15:05:11</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col4" class="data row1 col4" >Tuesday 15 December 2020, 15:05:14</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row1_col5" class="data row1 col5" >00:00:03 120ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col0" class="data row2 col0" ><a href="../LinearReg/03-Polynomial-Regression.ipynb">POLR1</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col1" class="data row2 col1" >LinearReg</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col2" class="data row2 col2" ><a href="../LinearReg/03-Polynomial-Regression.ipynb"><b>03-Polynomial-Regression.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col3" class="data row2 col3" >Tuesday 15 December 2020, 15:05:27</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col4" class="data row2 col4" >Tuesday 15 December 2020, 15:05:28</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row2_col5" class="data row2 col5" >00:00:01 686ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col0" class="data row3 col0" ><a href="../LinearReg/04-Logistic-Regression.ipynb">LOGR1</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col1" class="data row3 col1" >LinearReg</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col2" class="data row3 col2" ><a href="../LinearReg/04-Logistic-Regression.ipynb"><b>04-Logistic-Regression.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col3" class="data row3 col3" >Tuesday 15 December 2020, 15:05:42</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col4" class="data row3 col4" >Tuesday 15 December 2020, 15:06:44</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row3_col5" class="data row3 col5" >00:01:02 112ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col0" class="data row4 col0" ><a href="../IRIS/01-Simple-Perceptron.ipynb">PER57</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col1" class="data row4 col1" >IRIS</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col2" class="data row4 col2" ><a href="../IRIS/01-Simple-Perceptron.ipynb"><b>01-Simple-Perceptron.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col3" class="data row4 col3" >Tuesday 15 December 2020, 21:49:41</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col4" class="data row4 col4" >Tuesday 15 December 2020, 21:49:41</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row4_col5" class="data row4 col5" >00:00:00 203ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col0" class="data row5 col0" ><a href="../BHPD/01-DNN-Regression.ipynb">BHP1</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col1" class="data row5 col1" >BHPD</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col2" class="data row5 col2" ><a href="../BHPD/01-DNN-Regression.ipynb"><b>01-DNN-Regression.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col3" class="data row5 col3" >Tuesday 15 December 2020, 21:51:22</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col4" class="data row5 col4" >Tuesday 15 December 2020, 21:51:32</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row5_col5" class="data row5 col5" >00:00:10 080ms</td>
</tr>
<tr>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col0" class="data row6 col0" ><a href="../BHPD/02-DNN-Regression-Premium.ipynb">BHP2</a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col1" class="data row6 col1" >BHPD</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col2" class="data row6 col2" ><a href="../BHPD/02-DNN-Regression-Premium.ipynb"><b>02-DNN-Regression-Premium.ipynb</b></a></td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col3" class="data row6 col3" >Tuesday 15 December 2020, 22:05:15</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col4" class="data row6 col4" >Tuesday 15 December 2020, 22:05:26</td>
<td id="T_5acb85fa_3f19_11eb_8e56_19607a97f796row6_col5" class="data row6 col5" >00:00:11 601ms</td>
</tr>
</tbody></table>
</div>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 140.2164 40.848" width="80px"><title>00-Fidle-logo-01</title><g id="Calque_2" data-name="Calque 2"><g id="Calque_4" data-name="Calque 4"><path d="M46.1913,31.74a12.9222,12.9222,0,0,0,5.2755-1.77,6.4763,6.4763,0,0,1,2.3629-.9239,14.6364,14.6364,0,0,0-5.7616-16.4446,17.0565,17.0565,0,0,0-11.8732-2.0051c-4.1719.69-8.4957,3.8461-10.189,5.2622-1.0189.8536-13.1385,12.3424-18.1936,10.0527-3.42-1.5492,2.6862-7.1873-.1144-12.3393a.2236.2236,0,0,0-.373-.0248c-1.4257,1.9233-2.8193,4.2317-4.7179,3.1953-.8482-.4632-1.6116-1.9422-2.2-2.8775A.2216.2216,0,0,0,0,13.9917,23.35,23.35,0,0,0,5.87,28.2417a35.3776,35.3776,0,0,0,24.34,12.518c5.3439.5321,18.0193-1.1527,23.0835-10.2646a12.7681,12.7681,0,0,0-1.2217.6066,14.2177,14.2177,0,0,1-5.7629,1.9167c-.1761.0163-.3511.0236-.5261.0236a10.1733,10.1733,0,0,1-5.7446-2.303,1.0764,1.0764,0,1,1,.8227-1.0443c0,.0176-.0042.0339-.0054.0515C41.8966,30.5423,44.0669,31.9474,46.1913,31.74ZM30.0385,36.5091a19.6093,19.6093,0,0,1-4.6162.8385c-1.0425.0006-1.476-.2954-1.6824-.7392-.5431-1.1678,1.4136-2.8563,3.1493-4.0677a.6418.6418,0,1,1,.7343,1.0528,10.5781,10.5781,0,0,0-2.651,2.4368c.339.0732,1.44.12,4.733-.7616a.6422.6422,0,0,1,.333,1.24Zm14.87-15.6442a2.4512,2.4512,0,0,1,2.38,2.3617,1.6015,1.6015,0,1,0-1.4179,2.34,1.6573,1.6573,0,0,0,.2973-.03,2.28,2.28,0,0,1-1.2593.3875,2.5337,2.5337,0,0,1,0-5.06ZM36.6423,4.436A1.2835,1.2835,0,0,0,37.1466,6.18c.6211.342,1.9294-.402,2.7231.7071.4122.5763-.8627-2.6129-1.4839-2.9556A1.2827,1.2827,0,0,0,36.6423,4.436Zm6.5389.1374c-1.5995.9378-1.8961,4.8154-1.4838,4.2391a7.2989,7.2989,0,0,1,2.7231-1.9906,1.2837,1.2837,0,0,0-1.2393-2.2485ZM41.5587.2981c-.8179.9462-.2579,3.4-.1114,2.95a5.2169,5.2169,0,0,1,1.3174-1.8537A.8415.8415,0,0,0,42.7441.2054.8332.8332,0,0,0,41.5587.2981Z" style="fill:#e12229"/><path d="M65.6671,13.7493H77.3946V15.158H67.3223v9.4379h9.2271v1.4087H67.3223v11.481H65.6671Z" style="fill:#808285"/><path d="M83.5909,13.7493V37.4856H81.9356V13.7493Z" style="fill:#808285"/><path d="M89.3658,14.0662a39.0353,39.0353,0,0,1,6.0576-.4932c4.3316,0,7.607,1.1621,9.5791,3.24a11.2256,11.2256,0,0,1,2.958,8.2056,13.0738,13.0738,0,0,1-3.0991,9.0156c-2.1128,2.2891-5.67,3.6275-10.248,3.6275a50.7148,50.7148,0,0,1-5.2476-.2115Zm1.6553,22.0107a29.8576,29.8576,0,0,0,3.8388.1763c7.607,0,11.375-4.2617,11.375-11.1289.0352-6.022-3.31-10.1426-10.9174-10.1426a25.2377,25.2377,0,0,0-4.2964.352Z" style="fill:#808285"/><path d="M112.15,13.7493h1.6553V36.0769h10.6006v1.4087H112.15Z" style="fill:#808285"/><path d="M139.0894,25.6877h-9.5088V36.0769h10.6358v1.4087h-12.291V13.7493h11.7275V15.158H129.5806v9.1211h9.5088Z" style="fill:#808285"/></g></g></svg>
</body>
</html>
\ No newline at end of file
{
"LINR1": {
"path": "/home/pjluc/dev/fidle/LinearReg",
"start": "Tuesday 15 December 2020, 14:04:04",
"end": "Tuesday 15 December 2020, 14:04:04",
"duration": "00:00:00 295ms"
},
"GRAD1": {
"path": "/home/pjluc/dev/fidle/LinearReg",
"start": "Tuesday 15 December 2020, 15:05:11",
"end": "Tuesday 15 December 2020, 15:05:14",
"duration": "00:00:03 120ms"
},
"POLR1": {
"path": "/home/pjluc/dev/fidle/LinearReg",
"start": "Tuesday 15 December 2020, 15:05:27",
"end": "Tuesday 15 December 2020, 15:05:28",
"duration": "00:00:01 686ms"
},
"LOGR1": {
"path": "/home/pjluc/dev/fidle/LinearReg",
"start": "Tuesday 15 December 2020, 15:05:42",
"end": "Tuesday 15 December 2020, 15:06:44",
"duration": "00:01:02 112ms"
},
"PER57": {
"path": "/home/pjluc/dev/fidle/IRIS",
"start": "Tuesday 15 December 2020, 21:49:41",
"end": "Tuesday 15 December 2020, 21:49:41",
"duration": "00:00:00 203ms"
},
"BHP1": {
"path": "/home/pjluc/dev/fidle/BHPD",
"start": "Tuesday 15 December 2020, 21:51:22",
"end": "Tuesday 15 December 2020, 21:51:32",
"duration": "00:00:10 080ms"
},
"BHP2": {
"path": "/home/pjluc/dev/fidle/BHPD",
"start": "Tuesday 15 December 2020, 22:05:15",
"end": "Tuesday 15 December 2020, 22:05:26",
"duration": "00:00:11 601ms"
}
}
\ No newline at end of file
# See : https://matplotlib.org/users/customizing.html
axes.titlesize : 24
axes.labelsize : 20
axes.edgecolor : dimgrey
axes.labelcolor : dimgrey
axes.linewidth : 2
axes.grid : False
axes.prop_cycle : cycler('color', ['steelblue', 'tomato', '2ca02c', 'd62728', '9467bd', '8c564b', 'e377c2', '7f7f7f', 'bcbd22', '17becf'])
lines.linewidth : 3
lines.markersize : 10
xtick.color : black
xtick.labelsize : 18
ytick.color : black
ytick.labelsize : 18
axes.spines.left : True
axes.spines.bottom : True
axes.spines.top : False
axes.spines.right : False
savefig.dpi : 300 # figure dots per inch or 'figure'
savefig.facecolor : white # figure facecolor when saving
savefig.edgecolor : white # figure edgecolor when saving
savefig.format : svg
savefig.bbox : tight
savefig.pad_inches : 0.1
savefig.transparent : True
# ==================================================================
# ____ _ _ _ __ __ _
# | _ \ _ __ __ _ ___| |_(_) ___ __ _| | \ \ / /__ _ __| | __
# | |_) | '__/ _` |/ __| __| |/ __/ _` | | \ \ /\ / / _ \| '__| |/ /
# | __/| | | (_| | (__| |_| | (_| (_| | | \ V V / (_) | | | <
# |_| |_| \__,_|\___|\__|_|\___\__,_|_| \_/\_/ \___/|_| |_|\_\
# module pwk
# ==================================================================
# A simple module to host some common functions for practical work
# Jean-Luc Parouty 2020
import os
import glob
import shutil
from datetime import datetime
import itertools
import datetime, time
import json
import math
import numpy as np
from collections.abc import Iterable
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from IPython.display import display,Image,Markdown,HTML
import fidle.config as config
datasets_dir = None
notebook_id = None
_save_figs = False
_figs_dir = './figs'
_figs_name = 'fig_'
_figs_id = 0
_start_time = None
_end_time = None
# -------------------------------------------------------------
# init_all
# -------------------------------------------------------------
#
def init(name=None, mplstyle=None, cssfile=None):
global notebook_id
global datasets_dir
global _start_time
# ---- Parameters
#
notebook_id = config.DEFAULT_NOTEBOOK_NAME if name is None else name
if mplstyle is None:
mplstyle = config.FIDLE_MPLSTYLE
if cssfile is None:
cssfile = config.FIDLE_CSSFILE
# ---- Load matplotlib style and css
#
matplotlib.style.use(mplstyle)
load_cssfile(cssfile)
# ---- Create subdirs
#
mkdir('./run')
# ---- Try to find where we are
#
datasets_dir = where_are_my_datasets()
# ---- Update Keras cache
#
updated = update_keras_cache()
# ---- Today and now
#
_start_time = datetime.datetime.now()
# ---- Hello world
print('\nFIDLE 2020 - Practical Work Module')
print('Version :', config.VERSION)
print('Notebook id :', notebook_id)
print('Run time :', _start_time.strftime("%A %-d %B %Y, %H:%M:%S"))
print('TensorFlow version :', tf.__version__)
print('Keras version :', tf.keras.__version__)
print('Datasets dir :', datasets_dir)
print('Update keras cache :',updated)
update_finished_file(start=True)
return datasets_dir
# ------------------------------------------------------------------
# Update keras cache
# ------------------------------------------------------------------
# Try to sync ~/.keras/cache with datasets/keras_cache
# because sometime, we cannot access to internet... (IDRIS..)
#
def update_keras_cache():
updated = False
if os.path.isdir(f'{datasets_dir}/keras_cache'):
from_dir = f'{datasets_dir}/keras_cache/*.*'
to_dir = os.path.expanduser('~/.keras/datasets')
mkdir(to_dir)
for pathname in glob.glob(from_dir):
filename=os.path.basename(pathname)
destname=f'{to_dir}/{filename}'
if not os.path.isfile(destname):
shutil.copy(pathname, destname)
updated=True
return updated
# ------------------------------------------------------------------
# Where are my datasets ?
# ------------------------------------------------------------------
#
def where_are_my_datasets():
datasets_dir = os.getenv('FIDLE_DATASETS_DIR', False)
if datasets_dir is False :
display_md('## ATTENTION !!\n----')
print('Le dossier datasets sont introuvable\n')
print('Pour que les notebooks puissent les localiser, vous devez :\n')
print(' 1/ Récupérer le dossier datasets')
print(' Une archive (datasets.tar) est disponible via le repository Fidle.\n')
print(" 2/ Préciser la localisation de ce dossier datasets via la variable")
print(" d'environnement : FIDLE_DATASETS_DIR.\n")
print('Exemple :')
print(" Dans votre fichier .bashrc :")
print(' export FIDLE_DATASETS_DIR=~/datasets')
display_md('----')
assert False, 'datasets folder not found, please set FIDLE_DATASETS_DIR env var.'
else:
return datasets_dir
# -------------------------------------------------------------
# Folder cooking
# -------------------------------------------------------------
#
def tag_now():
return datetime.datetime.now().strftime("%Y-%m-%d_%Hh%Mm%Ss")
def mkdir(path):
os.makedirs(path, mode=0o750, exist_ok=True)
def get_directory_size(path):
"""
Return the directory size, but only 1 level
args:
path : directory path
return:
size in Mo
"""
size=0
for f in os.listdir(path):
if os.path.isfile(path+'/'+f):
size+=os.path.getsize(path+'/'+f)
return size/(1024*1024)
# -------------------------------------------------------------
# shuffle_dataset
# -------------------------------------------------------------
#
def shuffle_np_dataset(x, y):
"""
Shuffle a dataset (x,y)
args:
x,y : dataset
return:
x,y mixed
"""
assert (len(x) == len(y)), "x and y must have same size"
p = np.random.permutation(len(x))
return x[p], y[p]
def update_progress(what,i,imax, redraw=False):
"""
Display a text progress bar, as :
My progress bar : ############# 34%
args:
what : Progress bas name
i : Current progress
imax : Max value for i
return:
nothing
"""
bar_length = min(40,imax)
if (i%int(imax/bar_length))!=0 and i<imax and not redraw:
return
progress = float(i/imax)
block = int(round(bar_length * progress))
endofline = '\r' if progress<1 else '\n'
text = "{:16s} [{}] {:>5.1f}% of {}".format( what, "#"*block+"-"*(bar_length-block), progress*100, imax)
print(text, end=endofline)
def rmax(l):
"""
Recursive max() for a given iterable of iterables
Should be np.array of np.array or list of list, etc.
args:
l : Iterable of iterables
return:
max value
"""
maxi = float('-inf')
for item in l:
if isinstance(item, Iterable):
t = rmax(item)
else:
t = item
if t > maxi:
maxi = t
return maxi
def rmin(l):
"""
Recursive min() for a given iterable of iterables
Should be np.array of np.array or list of list, etc.
args:
l : Iterable of iterables
return:
min value
"""
mini = float('inf')
for item in l:
if isinstance(item, Iterable):
t = rmin(item)
else:
t = item
if t < mini:
mini = t
return mini
# -------------------------------------------------------------
# show_images
# -------------------------------------------------------------
#
def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1,
colorbar=False, y_pred=None, cm='binary',y_padding=0.35, spines_alpha=1,
fontsize=20, save_as='auto'):
"""
Show some images in a grid, with legends
args:
x : images - Shapes must be (-1,lx,ly) (-1,lx,ly,1) or (-1,lx,ly,3)
y : real classes or labels or None (None)
indices : indices of images to show or None for all (None)
columns : number of columns (12)
x_size,y_size : figure size (1), (1)
colorbar : show colorbar (False)
y_pred : predicted classes (None)
cm : Matplotlib color map (binary)
y_padding : Padding / rows (0.35)
spines_alpha : Spines alpha (1.)
font_size : Font size in px (20)
save_as : Filename to use if save figs is enable ('auto')
returns:
nothing
"""
if indices=='all': indices=range(len(x))
draw_labels = (y is not None)
draw_pred = (y_pred is not None)
rows = math.ceil(len(indices)/columns)
fig=plt.figure(figsize=(columns*x_size, rows*(y_size+y_padding)))
n=1
for i in indices:
axs=fig.add_subplot(rows, columns, n)
n+=1
# ---- Shape is (lx,ly)
if len(x[i].shape)==2:
xx=x[i]
# ---- Shape is (lx,ly,n)
if len(x[i].shape)==3:
(lx,ly,lz)=x[i].shape
if lz==1:
xx=x[i].reshape(lx,ly)
else:
xx=x[i]
img=axs.imshow(xx, cmap = cm, interpolation='lanczos')
axs.spines['right'].set_visible(True)
axs.spines['left'].set_visible(True)
axs.spines['top'].set_visible(True)
axs.spines['bottom'].set_visible(True)
axs.spines['right'].set_alpha(spines_alpha)
axs.spines['left'].set_alpha(spines_alpha)
axs.spines['top'].set_alpha(spines_alpha)
axs.spines['bottom'].set_alpha(spines_alpha)
axs.set_yticks([])
axs.set_xticks([])
if draw_labels and not draw_pred:
axs.set_xlabel(y[i],fontsize=fontsize)
if draw_labels and draw_pred:
if y[i]!=y_pred[i]:
axs.set_xlabel(f'{y_pred[i]} ({y[i]})',fontsize=fontsize)
axs.xaxis.label.set_color('red')
else:
axs.set_xlabel(y[i],fontsize=fontsize)
if colorbar:
fig.colorbar(img,orientation="vertical", shrink=0.65)
save_fig(save_as)
plt.show()
def plot_image(x,cm='binary', figsize=(4,4),save_as='auto'):
"""
Draw a single image.
Image shape can be (lx,ly), (lx,ly,1) or (lx,ly,n)
args:
x : image as np array
cm : color map ('binary')
figsize : fig size (4,4)
"""
# ---- Shape is (lx,ly)
if len(x.shape)==2:
xx=x
# ---- Shape is (lx,ly,n)
if len(x.shape)==3:
(lx,ly,lz)=x.shape
if lz==1:
xx=x.reshape(lx,ly)
else:
xx=x
# ---- Draw it
plt.figure(figsize=figsize)
plt.imshow(xx, cmap = cm, interpolation='lanczos')
save_fig(save_as)
plt.show()
# -------------------------------------------------------------
# show_history
# -------------------------------------------------------------
#
def plot_history(history, figsize=(8,6),
plot={"Accuracy":['accuracy','val_accuracy'], 'Loss':['loss', 'val_loss']},
save_as='auto'):
"""
Show history
args:
history: history
figsize: fig size
plot: list of data to plot : {<title>:[<metrics>,...], ...}
"""
fig_id=0
for title,curves in plot.items():
plt.figure(figsize=figsize)
plt.title(title)
plt.ylabel(title)
plt.xlabel('Epoch')
for c in curves:
plt.plot(history.history[c])
plt.legend(curves, loc='upper left')
if save_as=='auto':
figname='auto'
else:
figname=f'{save_as}_{fig_id}'
fig_id+=1
save_fig(figname)
plt.show()
# -------------------------------------------------------------
# plot_confusion_matrix
# -------------------------------------------------------------
# Bug in Matplotlib 3.1.1
#
def plot_confusion_matrix(cm,
title='Confusion matrix',
figsize=(12,8),
cmap="gist_heat_r",
vmin=0,
vmax=1,
xticks=5,yticks=5,
annot=True,
save_as='auto'):
"""
given a sklearn confusion matrix (cm), make a nice plot
Note:bug in matplotlib 3.1.1
Args:
cm: confusion matrix from sklearn.metrics.confusion_matrix
title: the text to display at the top of the matrix
figsize: Figure size (12,8)
cmap: color map (gist_heat_r)
vmi,vmax: Min/max 0 and 1
annot: Annotation or just colors (True)
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
plt.figure(figsize=figsize)
sn.heatmap(cm, linewidths=1, linecolor="#ffffff",square=True,
cmap=cmap, xticklabels=xticks, yticklabels=yticks,
vmin=vmin,vmax=vmax,annot=annot)
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
save_fig(save_as)
plt.show()
def display_confusion_matrix(y_true,y_pred,labels=None,color='green',
font_size='12pt', title="#### Confusion matrix is :"):
"""
Show a confusion matrix for a predictions.
see : sklearn.metrics.confusion_matrix
Args:
y_true Real classes
y_pred Predicted classes
labels List of classes to show in the cm
color: Color for the palette (green)
font_size: Values font size
title: the text to display at the top of the matrix
"""
assert (labels!=None),"Label must be set"
if title != None : display(Markdown(title))
cm = confusion_matrix( y_true,y_pred, normalize="true", labels=labels)
df=pd.DataFrame(cm)
cmap = sn.light_palette(color, as_cmap=True)
df.style.set_properties(**{'font-size': '20pt'})
display(df.style.format('{:.2f}') \
.background_gradient(cmap=cmap)
.set_properties(**{'font-size': font_size}))
def plot_donut(values, labels, colors=["lightsteelblue","coral"], figsize=(6,6), title=None, save_as='auto'):
"""
Draw a donut
args:
values : list of values
labels : list of labels
colors : list of color (["lightsteelblue","coral"])
figsize : size of figure ( (6,6) )
return:
nothing
"""
# ---- Title or not
if title != None : display(Markdown(title))
# ---- Donut
plt.figure(figsize=figsize)
# ---- Draw a pie chart..
plt.pie(values, labels=labels,
colors = colors, autopct='%1.1f%%', startangle=70, pctdistance=0.85,
textprops={'fontsize': 18},
wedgeprops={"edgecolor":"w",'linewidth': 5, 'linestyle': 'solid', 'antialiased': True})
# ---- ..with a white circle
circle = plt.Circle((0,0),0.70,fc='white')
ax = plt.gca()
ax.add_artist(circle)
# Equal aspect ratio ensures that pie is drawn as a circle
plt.axis('equal')
plt.tight_layout()
save_fig(save_as)
plt.show()
def plot_multivariate_serie(sequence, labels=None, predictions=None, only_features=None,
columns=3, width=5,height=4,wspace=0.3,hspace=0.2,
save_as='auto', time_dt=1):
sequence_len = len(sequence)
features_len = sequence.shape[1]
if only_features is None : only_features=range(features_len)
if labels is None : labels=range(features_len)
t = np.arange(sequence_len)
if predictions is None:
dt = 0
else:
dt = len(predictions)
sequence_with_pred = sequence.copy()
sequence_with_pred[-dt:]=predictions
rows = math.ceil(features_len/columns)
fig = plt.figure(figsize=(columns*width, rows*height))
fig.subplots_adjust(wspace=0.3,hspace=0.2)
n=1
for i in only_features:
ax=fig.add_subplot(rows, columns, n)
ax.plot(t[:-dt], sequence[:-dt,i], '-', linewidth=1, color='steelblue', label=labels[i])
ax.plot(t[:-dt], sequence[:-dt,i], 'o', markersize=4, color='steelblue')
ax.plot(t[-dt-1:], sequence[-dt-1:,i],'--o', linewidth=1, fillstyle='none', markersize=6, color='steelblue')
if predictions is not None:
ax.plot(t[-dt-1:], sequence_with_pred[-dt-1:,i], '--', linewidth=1, fillstyle='full', markersize=6, color='red')
ax.plot(t[-dt:], predictions[:,i], 'o', linewidth=1, fillstyle='full', markersize=6, color='red')
ax.legend(loc="upper left")
n+=1
save_fig(save_as)
plt.show()
def set_save_fig(save=True, figs_dir='./figs', figs_name='fig_', figs_id=0):
"""
Set save_fig parameters
Default figs name is <figs_name><figs_id>.{png|svg}
args:
save : Boolean, True to save figs (True)
figs_dir : Path to save figs (./figs)
figs_name : Default basename for figs (figs_)
figs_id : Start id for figs name (0)
"""
global _save_figs, _figs_dir, _figs_name, _figs_id
_save_figs = save
_figs_dir = figs_dir
_figs_name = figs_name
_figs_id = figs_id
print(f'Save figs : {_save_figs}')
print(f'Path figs : {_figs_dir}')
def save_fig(filename='auto', png=True, svg=False):
"""
Save current figure
args:
filename : Image filename ('auto')
png : Boolean. Save as png if True (True)
svg : Boolean. Save as svg if True (False)
"""
global _save_figs, _figs_dir, _figs_name, _figs_id
if not _save_figs : return
mkdir(_figs_dir)
if filename=='auto':
path=f'{_figs_dir}/{_figs_name}{_figs_id:02d}'
else:
path=f'{_figs_dir}/{filename}'
if png : plt.savefig( f'{path}.png')
if svg : plt.savefig( f'{path}.png')
if filename=='auto': _figs_id+=1
def subtitle(t):
display(Markdown(f'<br>**{t}**'))
def display_md(md_text):
display(Markdown(md_text))
def display_img(img):
display(Image(img))
def hdelay(sec):
return str(datetime.timedelta(seconds=int(sec)))
# Return human delay like 01:14:28 543ms
def hdelay_ms(td):
sec = td.total_seconds()
hh = sec // 3600
mm = (sec // 60) - (hh * 60)
ss = sec - hh*3600 - mm*60
ms = (sec - int(sec))*1000
return f'{hh:02.0f}:{mm:02.0f}:{ss:02.0f} {ms:03.0f}ms'
def hsize(num, suffix='o'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return f'{num:3.1f} {unit}{suffix}'
num /= 1024.0
return f'{num:.1f} Y{suffix}'
def load_cssfile(cssfile):
if cssfile is None: return
styles = open(cssfile, "r").read()
display(HTML(styles))
def np_print(*args, format={'float': '{:6.3f}'.format}):
with np.printoptions(formatter=format):
for a in args:
print(a)
def check_finished_file():
if not os.access(config.FINISHED_FILE, os.W_OK):
print("\n** Error : Cannot access finished file in write mode for reset...")
print(f'** Finished file should be at : {config.FINISHED_FILE}\n')
return False
return True
def reset_finished_file():
if check_finished_file()==False : return
data={}
# ---- Save it
with open(config.FINISHED_FILE,'wt') as fp:
json.dump(data,fp,indent=4)
print(f'Finished file has been reset.\n')
def update_finished_file(start=False, end=False):
# ---- No writable finished file ?
if check_finished_file() is False : return
# ---- Load it
with open(config.FINISHED_FILE) as fp:
data = json.load(fp)
# ---- Update as a start
if start is True:
data[notebook_id] = {}
data[notebook_id]['path'] = os.getcwd()
data[notebook_id]['start'] = _start_time.strftime("%A %-d %B %Y, %H:%M:%S")
data[notebook_id]['end'] = ''
data[notebook_id]['duration'] = 'Unfinished...'
# ---- Update as an end
if end is True:
data[notebook_id]['end'] = _end_time.strftime("%A %-d %B %Y, %H:%M:%S")
data[notebook_id]['duration'] = hdelay_ms(_end_time - _start_time)
# ---- Save it
with open(config.FINISHED_FILE,'wt') as fp:
json.dump(data,fp,indent=4)
def end():
global _end_time
_end_time = datetime.datetime.now()
update_finished_file(end=True)
print('End time is :', time.strftime("%A %-d %B %Y, %H:%M:%S"))
print('Duration is :', hdelay_ms(_end_time - _start_time))
print('This notebook ends here')