Commit 20382f6b authored by Millian Poquet's avatar Millian Poquet
Browse files

Modification des structures de données utilisées pour accéder aux jobs, ajout...

Modification des structures de données utilisées pour accéder aux jobs, ajout d'une méthode jobFromJobID. Le vrai ID du job est utilisé partout dans le code pour cohérence. Correction de divers warnings clang. Ajout de copies mémoire après les lectures depuis le fichier JSON en prévision d'un clean de la mémoire associée au fichier JSON. Ajout de vérifications concernant les données venant de la socket (si le job existe, si le job est dans le bon état, si les machines sont valides...)
parent ce5d6daf
......@@ -11,9 +11,10 @@ CHECK_C_COMPILER_FLAG("-std=c99" COMPILER_SUPPORTS_C99)
if(COMPILER_SUPPORTS_C11)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11")
elseif(COMPILER_SUPPORTS_C99)
message(STATUS "The compiler ${CMAKE_C_COMPILER} has no C11 support. Using C99 instead.")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
else()
message(STATUS "The compiler ${CMAKE_C_COMPILER} has no C11 nor C99 support. Please use a different C compiler.")
message(STATUS "The compiler ${CMAKE_C_COMPILER} has no C11 nor C99 support. Please update your C compiler.")
endif()
add_executable(batsim batsim.c job.c utils.c export.c)
......
......@@ -18,96 +18,94 @@ msg_host_t *nodes;
/**
* \brief Execute jobs alone
* \brief Execute jobs alone
*
* Execute jobs sequentially without server and scheduler
*
*/
static int job_launcher(int argc, char *argv[])
{
int job_idx;
s_job_t job;
int i;
double t;
int *res_idxs;
for(job_idx = 0; job_idx < nb_jobs; job_idx++) {
t = MSG_get_clock();
job = jobs[job_idx];
res_idxs = (int *) malloc((job.nb_res) * sizeof(int));
for(i = 0; i < job.nb_res ; i++) {
res_idxs[i] = i;
}
job_exec(job_idx, job.nb_res, res_idxs, nodes, NULL);
XBT_INFO("Job id %d, job simulation time: %f", job.id, MSG_get_clock() - t);
free(res_idxs);
}
unsigned int job_index;
s_job_t * job;
xbt_dynar_foreach(jobs_dynar, job_index, job)
{
int t = MSG_get_clock();
int * res_idxs = (int *) malloc((job->nb_res) * sizeof(int));
for (int i = 0; i < job->nb_res; i++)
res_idxs[i] = i;
job_exec(job->id, job->nb_res, res_idxs, nodes, NULL);
XBT_INFO("Job id %d, job simulation time: %f", job->id, MSG_get_clock() - t);
free(res_idxs);
}
return 0;
}
msg_error_t deploy_all(const char *platform_file)
{
msg_error_t res = MSG_OK;
xbt_dynar_t all_hosts;
msg_host_t first_host;
msg_host_t host;
int i;
MSG_config("workstation/model", "ptask_L07");
MSG_create_environment(platform_file);
all_hosts = MSG_hosts_as_dynar();
xbt_dynar_get_cpy(all_hosts, 0, &first_host);
//first_host = xbt_dynar_getfirst_as(all_hosts,msg_host_t);
xbt_dynar_remove_at(all_hosts, 0, NULL);
nb_nodes = xbt_dynar_length(all_hosts);
nodes = xbt_dynar_to_array(all_hosts);
//xbt_dynar_free(&all_hosts);
XBT_INFO("Nb nodes: %d", nb_nodes);
MSG_process_create("job_launcher", job_launcher, NULL, first_host);
res = MSG_main();
XBT_INFO("Simulation time %g", MSG_get_clock());
return res;
msg_error_t res = MSG_OK;
xbt_dynar_t all_hosts;
msg_host_t first_host;
msg_host_t host;
int i;
MSG_config("workstation/model", "ptask_L07");
MSG_create_environment(platform_file);
all_hosts = MSG_hosts_as_dynar();
xbt_dynar_get_cpy(all_hosts, 0, &first_host);
//first_host = xbt_dynar_getfirst_as(all_hosts,msg_host_t);
xbt_dynar_remove_at(all_hosts, 0, NULL);
nb_nodes = xbt_dynar_length(all_hosts);
nodes = xbt_dynar_to_array(all_hosts);
//xbt_dynar_free(&all_hosts);
XBT_INFO("Nb nodes: %d", nb_nodes);
MSG_process_create("job_launcher", job_launcher, NULL, first_host);
res = MSG_main();
XBT_INFO("Simulation time %g", MSG_get_clock());
return res;
}
int main(int argc, char *argv[])
{
msg_error_t res = MSG_OK;
int i;
msg_error_t res = MSG_OK;
int i;
json_t *json_workload_profile;
json_t *json_workload_profile;
//Comment to remove debug message
xbt_log_control_set("batexec.threshold:debug");
//Comment to remove debug message
xbt_log_control_set("batexec.threshold:debug");
if (argc < 2) {
printf("Batexec: execute a list of jobs in FIFO.\n");
printf("Resources are assigned from 0, only one job is running at a time\n");
printf("\n");
printf("Usage: %s platform_file workload_file\n", argv[0]);
printf("example: %s ../platforms/small_platform.xml ../workload_profiles/test_workload_profile.json\n", argv[0]);
exit(1);
}
if (argc < 2)
{
printf("Batexec: execute a list of jobs in FIFO.\n");
printf("Resources are assigned from 0, only one job is running at a time\n");
printf("\n");
printf("Usage: %s platform_file workload_file\n", argv[0]);
printf("example: %s ../platforms/small_platform.xml ../workload_profiles/test_workload_profile.json\n", argv[0]);
exit(1);
}
json_workload_profile = load_json_workload_profile(argv[2]);
retrieve_jobs(json_workload_profile);
retrieve_profiles(json_workload_profile);
json_workload_profile = load_json_workload_profile(argv[2]);
retrieve_jobs(json_workload_profile);
retrieve_profiles(json_workload_profile);
MSG_init(&argc, argv);
MSG_init(&argc, argv);
res = deploy_all(argv[1]);
if (res == MSG_OK)
return 0;
else
return 1;
res = deploy_all(argv[1]);
if (res == MSG_OK)
return 0;
else
return 1;
}
......@@ -71,6 +71,8 @@ static int send_uds(char *msg)
write(uds_fd, &lg, 4);
write(uds_fd, msg, lg);
free(msg);
return 0;
}
/**
......@@ -83,6 +85,7 @@ static char *recv_uds()
char *msg;
read(uds_fd, &lg, 4);
XBT_INFO("Received message length: %d bytes", lg);
xbt_assert(lg > 0, "Invalid message received (size=%d)", lg);
msg = (char *) malloc(sizeof(char)*(lg+1)); /* +1 for null terminator */
read(uds_fd, msg, lg);
msg[lg] = 0;
......@@ -162,12 +165,12 @@ static int send_sched(int argc, char *argv[])
/**
* \brief
*/
void send_message(const char *dst, e_task_type_t type, int job_idx, void *data)
void send_message(const char *dst, e_task_type_t type, int job_id, void *data)
{
msg_task_t task_sent;
task_data_t req_data = xbt_new0(s_task_data_t,1);
req_data->type = type;
req_data->job_idx = job_idx;
req_data->job_id = job_id;
req_data->data = data;
req_data->src = MSG_host_get_name(MSG_host_self());
task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data);
......@@ -191,7 +194,6 @@ static void task_free(struct msg_task ** task)
}
}
/**
* @brief The function in charge of job launching
* @return 0
......@@ -208,8 +210,9 @@ static int launch_job(int argc, char *argv[])
data = MSG_process_get_data(MSG_process_self());
s_kill_data_t * kdata = data->killerData;
int jobID = data->jobID;
s_job_t * job = jobFromJobID(jobID);
XBT_INFO("Launching job %d", jobID, data->killerProcess, MSG_process_self());
XBT_INFO("Launching job %d", jobID);
// Let's run the job
pajeTracer_addJobLaunching(tracer, MSG_get_clock(), jobID, data->reservedNodeCount, data->reservedNodesIDs);
......@@ -217,16 +220,17 @@ static int launch_job(int argc, char *argv[])
job_exec(jobID, data->reservedNodeCount, data->reservedNodesIDs, nodes, &(data->dataToRelease));
// .
// / \
// / | \
// / | \ The remaining code of this function is executed ONLY IF
// / | \ the job finished in time (its execution time was smaller than its walltime)
// / \
// / * \
// . |
// / \ |
// / | \ |
// / | \ | The remaining code of this function is executed ONLY IF
// / | \ | the job finished in time (its execution time was smaller than its walltime)
// / \ |
// / * \ |
// ‾‾‾‾‾‾‾‾‾‾‾‾‾
XBT_INFO("Job %d finished in time.", jobID);
job->state = JOB_STATE_COMPLETED_SUCCESSFULLY;
int dbg = 0;
pajeTracer_addJobEnding(tracer, MSG_get_clock(), jobID, data->reservedNodeCount, data->reservedNodesIDs);
......@@ -261,22 +265,25 @@ static int kill_job(int argc, char *argv[])
data = MSG_process_get_data(MSG_process_self());
s_launch_data_t * ldata = data->launcherData;
int jobID = ldata->jobID;
s_job_t * job = jobFromJobID(jobID);
double walltime = job->walltime;
XBT_INFO("Sleeping for %lf seconds to possibly kill job %d", jobs[jobID].walltime, jobID);
XBT_INFO("Sleeping for %lf seconds to possibly kill job %d", walltime, jobID);
// Let's sleep until the walltime is reached
MSG_process_sleep(jobs[jobID].walltime);
MSG_process_sleep(walltime);
// .
// / \
// / | \
// / | \ The remaining code of this function is executed ONLY IF
// / | \ the killer finished its wait before the launcher completion
// / \
// / * \
// / \ |
// / | \ |
// / | \ | The remaining code of this function is executed ONLY IF
// / | \ | the killer finished its wait before the launcher completion
// / \ |
// / * \ |
// ‾‾‾‾‾‾‾‾‾‾‾‾‾
XBT_INFO("Sleeping done. Job %d did NOT finish in time and must be killed", jobID);
job->state = JOB_STATE_COMPLETED_SUCCESSFULLY;
pajeTracer_addJobEnding(tracer, MSG_get_clock(), jobID, ldata->reservedNodeCount, ldata->reservedNodesIDs);
pajeTracer_addJobKill(tracer, MSG_get_clock(), jobID, ldata->reservedNodeCount, ldata->reservedNodesIDs);
......@@ -288,7 +295,7 @@ static int kill_job(int argc, char *argv[])
free(ldata);
free(data);
XBT_INFO("Killing and freeing done", jobID);
XBT_INFO("Killing and freeing of job %d done", jobID);
// Let's say to the server that the job execution finished
send_message("server", JOB_COMPLETED, jobID, NULL);
......@@ -350,7 +357,7 @@ static int node(int argc, char *argv[])
killData->launcherProcess = launcher;
killData->launcherData = launchData;
XBT_INFO("job %d master: processes and data set", task_data->job_idx);
XBT_INFO("job %d master: processes and data set", task_data->job_id);
// Let's wake the processes then destroy the barrier
MSG_barrier_wait(barrier);
......@@ -362,6 +369,7 @@ static int node(int argc, char *argv[])
task_free(&task_received);
}
return 0;
}
/**
......@@ -371,6 +379,25 @@ static int node(int argc, char *argv[])
*/
static int jobs_submitter(int argc, char *argv[])
{
s_job_t * job;
unsigned int job_index;
// todo: read jobs here and sort them by ascending submission date
double previousSubmissionDate = MSG_get_clock();
xbt_dynar_foreach(jobs_dynar, job_index, job)
{
if (job->submission_time < previousSubmissionDate)
XBT_ERROR("The input workload JSON file is not sorted by ascending date, which is not handled yet");
double timeToSleep = max(0, job->submission_time - previousSubmissionDate);
MSG_process_sleep(timeToSleep);
previousSubmissionDate = MSG_get_clock();
send_message("server", JOB_SUBMITTED, job->id, NULL);
}
/*
double submission_time = 0.0;
int job2submit_idx = 0;
xbt_dynar_t jobs2sub_dynar;
......@@ -398,7 +425,8 @@ static int jobs_submitter(int argc, char *argv[])
send_message("server", JOB_SUBMITTED, job_idx, NULL);
}
xbt_dynar_free(&jobs2sub_dynar);
}
}*/
return 0;
}
......@@ -421,10 +449,10 @@ int server(int argc, char *argv[])
char *tmp;
char *job_ready_str;
char *jobid_ready;
char *job_id_str;
char *res_str;
char *job_id_str;
double t = 0;
int i, j;
unsigned int i, j;
while ((nb_completed_jobs < nb_jobs) || !sched_ready)
{
......@@ -440,9 +468,10 @@ int server(int argc, char *argv[])
case JOB_COMPLETED:
{
nb_completed_jobs++;
job_id_str = jobs[task_data->job_idx].id_str;
XBT_INFO("Job id %s COMPLETED, %d jobs completed", job_id_str, nb_completed_jobs);
size_m = asprintf(&sched_message, "%s|%f:C:%s", sched_message, MSG_get_clock(), job_id_str);
s_job_t * job = jobFromJobID(task_data->job_id);
XBT_INFO("Job %d COMPLETED. %d jobs completed so far", job->id, nb_completed_jobs);
size_m = asprintf(&sched_message, "%s|%f:C:%s", sched_message, MSG_get_clock(), job->id_str);
XBT_INFO("Message to send to scheduler: %s", sched_message);
//TODO add job_id + msg to send
......@@ -451,9 +480,11 @@ int server(int argc, char *argv[])
case JOB_SUBMITTED:
{
nb_submitted_jobs++;
job_id_str = jobs[task_data->job_idx].id_str;
XBT_INFO("Job id %s SUBMITTED, %d jobs submitted", job_id_str, nb_submitted_jobs);
size_m = asprintf(&sched_message, "%s|%f:S:%s", sched_message, MSG_get_clock(), job_id_str);
s_job_t * job = jobFromJobID(task_data->job_id);
job->state = JOB_STATE_SUBMITTED;
XBT_INFO("Job %d SUBMITTED. %d jobs submitted so far", job->id, nb_submitted_jobs);
size_m = asprintf(&sched_message, "%s|%f:S:%s", sched_message, MSG_get_clock(), job->id_str);
XBT_INFO("Message to send to scheduler: %s", sched_message);
break;
......@@ -481,7 +512,11 @@ int server(int argc, char *argv[])
job_id_str = *(char **)xbt_dynar_get_ptr(job_id_res, 0);
char * job_reservs_str = *(char **)xbt_dynar_get_ptr(job_id_res, 1);
int *job_idx = xbt_dict_get(jobs_idx2id, job_id_str);
int jobID = strtol(job_id_str, NULL, 10);
xbt_assert(jobExists(jobID), "Invalid jobID '%s' received from the scheduler: the job does not exist", job_id_str);
s_job_t * job = jobFromJobID(jobID);
xbt_assert(job->state == JOB_STATE_SUBMITTED, "Invalid allocation from the scheduler: the job %d is either not submitted yet or already scheduled", jobID);
job->state = JOB_STATE_RUNNING;
xbt_dynar_t res_dynar = xbt_str_split(job_reservs_str, ",");
......@@ -490,12 +525,12 @@ int server(int argc, char *argv[])
// Let's create a launch data structure, which will be given to the head_node then used to launch the job
s_launch_data_t * launchData = (s_launch_data_t*) malloc(sizeof(s_launch_data_t));
launchData->jobID = *job_idx;
launchData->jobID = jobID;
launchData->reservedNodeCount = xbt_dynar_length(res_dynar);
xbt_assert(launchData->reservedNodeCount == jobs[launchData->jobID].nb_res,
xbt_assert(launchData->reservedNodeCount == jobFromJobID(launchData->jobID)->nb_res,
"Invalid scheduling algorithm decision: allocation of job %d is done on %d nodes (instead of %d)",
launchData->jobID, launchData->reservedNodeCount, jobs[launchData->jobID].nb_res);
launchData->jobID, launchData->reservedNodeCount, jobFromJobID(launchData->jobID)->nb_res);
launchData->reservedNodesIDs = (int*) malloc(launchData->reservedNodeCount * sizeof(int));
launchData->dataToRelease = xbt_dict_new();
......@@ -504,10 +539,13 @@ int server(int argc, char *argv[])
// Let's fill the reserved node IDs from the XBT dynar
xbt_dynar_foreach(res_dynar, j, res_str)
{
launchData->reservedNodesIDs[j] = atoi(res_str);
int machineID = atoi(res_str);
launchData->reservedNodesIDs[j] = machineID;
xbt_assert(machineID >= 0 && machineID < nb_nodes,
"Invalid machineID (%d) received from the scheduler: not in range [0;%d]", machineID, nb_nodes);
}
send_message(MSG_host_get_name(nodes[head_node]), LAUNCH_JOB, *job_idx, launchData);
send_message(MSG_host_get_name(nodes[head_node]), LAUNCH_JOB, jobID, launchData);
xbt_dynar_free(&res_dynar);
xbt_dynar_free(&job_id_res);
......@@ -538,6 +576,10 @@ int server(int argc, char *argv[])
break;
} // end of case SCHED_READY
default:
{
XBT_ERROR("Unhandled data type received (%d)", task_data->type);
}
} // end of switch (outer)
task_free(&task_received);
......@@ -558,6 +600,8 @@ int server(int argc, char *argv[])
for(i = 0; i < nb_nodes; i++)
send_message(MSG_host_get_name(nodes[i]), FINALIZE, 0, NULL);
return 0;
}
/**
......@@ -580,7 +624,7 @@ msg_error_t deploy_all(const char *platform_file)
// Let's remove the master host from the hosts used to run jobs
msg_host_t host;
int i;
unsigned int i;
xbt_dynar_foreach(all_hosts, i, host)
{
if (strcmp(MSG_host_get_name(host), masterHostName) == 0)
......@@ -656,14 +700,7 @@ int main(int argc, char *argv[])
json_decref(json_workload_profile);
// Let's clear global allocated data
xbt_dict_free(&jobs_idx2id);
xbt_dict_free(&profiles);
for (int i = 0; i < nb_jobs; ++i)
{
free(jobs[i].id_str);
//free(jobs[i]);
}
freeJobStructures();
//free(jobs);
......
......@@ -21,7 +21,7 @@ typedef enum {
*/
typedef struct s_task_data {
e_task_type_t type; // type of task
int job_idx;
int job_id;
void *data;
const char* src; // used for logging
} s_task_data_t, *task_data_t;
......
......@@ -23,8 +23,8 @@ void profile_exec(const char *profile_str, int job_id, int nb_res, msg_host_t *j
computation_amount = malloc(nb_res * sizeof(double));
communication_amount = malloc(nb_res * nb_res * sizeof(double));
double *cpu = ((msg_par_t)(profile->data))->cpu;
double *com = ((msg_par_t)(profile->data))->com;
double *cpu = ((s_msg_par_t *)(profile->data))->cpu;
double *com = ((s_msg_par_t *)(profile->data))->com;
memcpy(computation_amount , cpu, nb_res * sizeof(double));
memcpy(communication_amount, com, nb_res * nb_res * sizeof(double));
......@@ -43,8 +43,8 @@ void profile_exec(const char *profile_str, int job_id, int nb_res, msg_host_t *j
}
else if (strcmp(profile->type, "msg_par_hg") == 0)
{
double cpu = ((msg_par_hg_t)(profile->data))->cpu;
double com = ((msg_par_hg_t)(profile->data))->com;
double cpu = ((s_msg_par_hg_t *)(profile->data))->cpu;
double com = ((s_msg_par_hg_t *)(profile->data))->com;
// These amounts are deallocated by SG
computation_amount = malloc(nb_res * sizeof(double));
......@@ -73,26 +73,24 @@ void profile_exec(const char *profile_str, int job_id, int nb_res, msg_host_t *j
}
else if (strcmp(profile->type, "composed") == 0)
{
char buffer[20];
int nb = ((composed_prof_t)(profile->data))->nb;
int lg_seq = ((composed_prof_t)(profile->data))->lg_seq;
int *seq = ((composed_prof_t)(profile->data))->seq;
s_composed_prof_t * data = (s_composed_prof_t *) profile->data;
int nb = data->nb;
int lg_seq = data->lg_seq;
char **seq = data->seq;
XBT_DEBUG("composed: nb: %d, lg_seq: %d", nb, lg_seq);
for (int j = 0; j < lg_seq; j++)
for (int i = 0; i < nb; i++)
{
for (int i = 0; i < nb; i++)
for (int j = 0; j < lg_seq; j++)
{
sprintf(buffer, "%d", seq[j]);
profile_exec(buffer, job_id, nb_res, job_res, allocatedStuff);
profile_exec(seq[j], job_id, nb_res, job_res, allocatedStuff);
}
}
}
else if (strcmp(profile->type, "delay") == 0)
{
double delay = ((delay_t)(profile->data))->delay;
double delay = ((s_delay_t *)(profile->data))->delay;
MSG_process_sleep(delay);
}
......@@ -110,7 +108,7 @@ void profile_exec(const char *profile_str, int job_id, int nb_res, msg_host_t *j
* \brief Load workload with jobs' profiles file
*/
void job_exec(int job_idx, int nb_res, int *res_idxs, msg_host_t *nodes, xbt_dict_t * allocatedStuff)
void job_exec(int job_id, int nb_res, int *res_idxs, msg_host_t *nodes, xbt_dict_t * allocatedStuff)
{
int dictCreatedHere = 0;
......@@ -121,8 +119,8 @@ void job_exec(int job_idx, int nb_res, int *res_idxs, msg_host_t *nodes, xbt_dic
dictCreatedHere = 1;
}
s_job_t job = jobs[job_idx];
XBT_DEBUG("Launch_job: idx %d, id %s profile %s", job_idx, jobs[job_idx].id_str, job.profile);
s_job_t * job = jobFromJobID(job_id);
XBT_INFO("job_exec: jobID %d, job=%p", job_id, job);
msg_host_t * job_res = (msg_host_t *) malloc(nb_res * sizeof(s_msg_host_t));
xbt_dict_set(*allocatedStuff, "hosts", job_res, free);
......@@ -130,7 +128,7 @@ void job_exec(int job_idx, int nb_res, int *res_idxs, msg_host_t *nodes, xbt_dic
for(int i = 0; i < nb_res; i++)
job_res[i] = nodes[res_idxs[i]];
profile_exec(job.profile, job_idx, nb_res, job_res, allocatedStuff);
profile_exec(job->profile, job_id, nb_res, job_res, allocatedStuff);
if (dictCreatedHere)
{
......
/* Copyright (c) 2015. The OAR Team.
* All rights reserved. */
* All rights reserved. */
#pragma once
#include <sys/types.h> /* ssize_t, needed by xbt/str.h, included by msg/msg.h */
#include <msg/msg.h>
#include <msg/msg.h>
#include <xbt.h>
#include <xbt/sysdep.h>
#include <xbt/log.h>
......@@ -14,37 +14,52 @@
//XBT_LOG_NEW_DEFAULT_CATEGORY(batsim, "Batsim");
typedef struct s_job {
int id;
char *id_str;
const char *profile;
double submission_time;
double walltime;
double runtime;
int nb_res;
} s_job_t, *job_t;
typedef struct s_msg_par {
int nb_res;
double *cpu;
double *com;
} s_msg_par_t, *msg_par_t;
typedef struct s_msg_par_hg {
double cpu;
double com;
} s_msg_par_hg_t, *msg_par_hg_t;
typedef struct s_composed_prof {
int nb;
int lg_seq;
int *seq;
} s_composed_prof_t, *composed_prof_t;
typedef struct s_delay {
double delay;
} s_delay_t, *delay_t;
void job_exec(int job_idx, int nb_res, int *res_idxs, msg_host_t *nodes, xbt_dict_t * allocatedStuff);
typedef enum e_job_state
{
JOB_STATE_NOT_SUBMITTED //! The job exists but cannot be scheduled yet
,JOB_STATE_SUBMITTED //! The job has been submitted, it can now be scheduled
,JOB_STATE_RUNNING //! The job has been scheduled and is currently being processed
,JOB_STATE_COMPLETED_SUCCESSFULLY //! The job execution finished before its walltime
,JOB_STATE_COMPLETED_KILLED //! The job execution time was longer than its walltime so the job had been killed
} e_job_state_t;
typedef struct s_job
{
int id; //! The job ID (as integer)
char *id_str; //! The job ID (as string)
char *profile; //! The job profile
double submission_time; //! The time at which the job is available to be scheduled
double walltime; //! The maximum execution time autorized for the job
double runtime; //! The execution time of the job
int nb_res; //! The number of resources the job asked for
e_job_state_t state; //! The state