Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Fidle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Talks
Fidle
Commits
05d9d44b
Commit
05d9d44b
authored
4 years ago
by
EXT Laurent Risser
Browse files
Options
Downloads
Patches
Plain Diff
Upload New File
parent
3d80a8da
No related branches found
No related tags found
1 merge request
!5
Update style in README
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
BHPD_PyTorch/fidle_pwk_additional.py
+88
-0
88 additions, 0 deletions
BHPD_PyTorch/fidle_pwk_additional.py
with
88 additions
and
0 deletions
BHPD_PyTorch/fidle_pwk_additional.py
0 → 100644
+
88
−
0
View file @
05d9d44b
import
torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
from
torch.autograd
import
Variable
import
numpy
as
np
class
convergence_history_CrossEntropyLoss
:
def
__init__
(
self
):
"""
Class to save the training converge properties
"""
self
.
loss
=
nn
.
CrossEntropyLoss
()
self
.
history
=
{}
#Save convergence measures in the end of each epoch
self
.
history
[
'
loss
'
]
=
[]
#value of the cost function on training data
self
.
history
[
'
accuracy
'
]
=
[]
#percentage of correctly classified instances on training data (if classification)
self
.
history
[
'
val_loss
'
]
=
[]
#value of the cost function on validation data
self
.
history
[
'
val_accuracy
'
]
=
[]
#percentage of correctly classified instances on validation data (if classification)
def
update
(
self
,
current_model
,
xtrain
,
ytrain
,
xtest
,
ytest
):
#convergence information on the training data
nb_training_obs
=
xtrain
.
shape
[
0
]
if
nb_training_obs
>
xtest
.
shape
[
0
]:
nb_training_obs
=
xtest
.
shape
[
0
]
epoch_shuffler
=
np
.
arange
(
xtrain
.
shape
[
0
])
np
.
random
.
shuffle
(
epoch_shuffler
)
mini_batch_observations
=
epoch_shuffler
[:
nb_training_obs
]
var_X_batch
=
Variable
(
xtrain
[
mini_batch_observations
,:]).
float
()
var_y_batch
=
Variable
(
ytrain
[
mini_batch_observations
])
y_pred_batch
=
current_model
(
var_X_batch
)
curr_loss
=
self
.
loss
(
y_pred_batch
,
var_y_batch
)
self
.
history
[
'
loss
'
].
append
(
curr_loss
.
item
())
self
.
history
[
'
accuracy
'
].
append
(
float
(
(
torch
.
argmax
(
y_pred_batch
,
dim
=
1
)
==
var_y_batch
).
float
().
mean
())
)
#convergence information on the test data
var_X_batch
=
Variable
(
xtest
[:,:]).
float
()
var_y_batch
=
Variable
(
ytest
[:])
y_pred_batch
=
current_model
(
var_X_batch
)
curr_loss
=
self
.
loss
(
y_pred_batch
,
var_y_batch
)
self
.
history
[
'
val_loss
'
].
append
(
curr_loss
.
item
())
self
.
history
[
'
val_accuracy
'
].
append
(
float
(
(
torch
.
argmax
(
y_pred_batch
,
dim
=
1
)
==
var_y_batch
).
float
().
mean
())
)
class
convergence_history_MSELoss
:
def
__init__
(
self
):
"""
Class to save the training converge properties
"""
self
.
loss
=
nn
.
MSELoss
()
self
.
MAE_loss
=
nn
.
L1Loss
()
self
.
history
=
{}
#Save convergence measures in the end of each epoch
self
.
history
[
'
loss
'
]
=
[]
#value of the cost function on training data
self
.
history
[
'
mae
'
]
=
[]
#mean absolute error on training data
self
.
history
[
'
val_loss
'
]
=
[]
#value of the cost function on validation data
self
.
history
[
'
val_mae
'
]
=
[]
#mean absolute error on validation data
def
update
(
self
,
current_model
,
xtrain
,
ytrain
,
xtest
,
ytest
):
#convergence information on the training data
nb_training_obs
=
xtrain
.
shape
[
0
]
if
nb_training_obs
>
xtest
.
shape
[
0
]:
nb_training_obs
=
xtest
.
shape
[
0
]
epoch_shuffler
=
np
.
arange
(
xtrain
.
shape
[
0
])
np
.
random
.
shuffle
(
epoch_shuffler
)
mini_batch_observations
=
epoch_shuffler
[:
nb_training_obs
]
var_X_batch
=
Variable
(
xtrain
[
mini_batch_observations
,:]).
float
()
var_y_batch
=
Variable
(
ytrain
[
mini_batch_observations
]).
float
()
y_pred_batch
=
current_model
(
var_X_batch
)
curr_loss
=
self
.
loss
(
y_pred_batch
.
view
(
-
1
),
var_y_batch
.
view
(
-
1
))
self
.
history
[
'
loss
'
].
append
(
curr_loss
.
item
())
self
.
history
[
'
mae
'
].
append
(
self
.
MAE_loss
(
y_pred_batch
.
view
(
-
1
),
var_y_batch
.
view
(
-
1
)).
item
())
#convergence information on the test data
var_X_batch
=
Variable
(
xtest
[:,:]).
float
()
var_y_batch
=
Variable
(
ytest
[:]).
float
()
y_pred_batch
=
current_model
(
var_X_batch
)
curr_loss
=
self
.
loss
(
y_pred_batch
.
view
(
-
1
),
var_y_batch
.
view
(
-
1
))
self
.
history
[
'
val_loss
'
].
append
(
curr_loss
.
item
())
self
.
history
[
'
val_mae
'
].
append
(
self
.
MAE_loss
(
y_pred_batch
.
view
(
-
1
),
var_y_batch
.
view
(
-
1
)).
item
())
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment