Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Add comments and disable verbose outputs
[simgrid.git] / src / surf / cpu_cas01.c
1 /* Copyright (c) 2009-2011. The SimGrid Team.
2  * All rights reserved.                                                     */
3
4 /* This program is free software; you can redistribute it and/or modify it
5  * under the terms of the license (GNU LGPL) which comes with this package. */
6
7 #include "surf_private.h"
8 #include "surf/surf_resource.h"
9 #include "maxmin_private.h"
10 #include "simgrid/sg_config.h"
11 #include "surf/cpu_cas01_private.h"
12
13 /* the model objects for physical machines and virtual machines */
14 surf_model_t surf_cpu_model_pm = NULL;
15 surf_model_t surf_cpu_model_vm = NULL;
16
17 #undef GENERIC_LMM_ACTION
18 #undef GENERIC_ACTION
19 #undef ACTION_GET_CPU
20 #define GENERIC_LMM_ACTION(action) action->generic_lmm_action
21 #define GENERIC_ACTION(action) GENERIC_LMM_ACTION(action).generic_action
22 #define ACTION_GET_CPU(action) ((surf_action_cpu_Cas01_t) action)->cpu
23
24 typedef struct surf_action_cpu_cas01 {
25   s_surf_action_lmm_t generic_lmm_action;
26 } s_surf_action_cpu_Cas01_t, *surf_action_cpu_Cas01_t;
27
28 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
29                                 "Logging specific to the SURF CPU IMPROVED module");
30
31 static xbt_swag_t
32     cpu_running_action_set_that_does_not_need_being_checked = NULL;
33
34
35 void *cpu_cas01_create_resource(const char *name, double power_peak,
36                                  double power_scale,
37                                  tmgr_trace_t power_trace,
38                                  int core,
39                                  e_surf_resource_state_t state_initial,
40                                  tmgr_trace_t state_trace,
41                                  xbt_dict_t cpu_properties,
42                                  surf_model_t cpu_model)
43 {
44   cpu_Cas01_t cpu = NULL;
45
46   xbt_assert(!surf_cpu_resource_priv(surf_cpu_resource_by_name(name)),
47              "Host '%s' declared several times in the platform file",
48              name);
49   cpu = (cpu_Cas01_t) surf_resource_new(sizeof(s_cpu_Cas01_t),
50                                         cpu_model, name,
51                                         cpu_properties);
52   cpu->power_peak = power_peak;
53   xbt_assert(cpu->power_peak > 0, "Power has to be >0");
54   cpu->power_scale = power_scale;
55   cpu->core = core;
56   xbt_assert(core > 0, "Invalid number of cores %d", core);
57
58   if (power_trace)
59     cpu->power_event =
60         tmgr_history_add_trace(history, power_trace, 0.0, 0, cpu);
61
62   cpu->state_current = state_initial;
63   if (state_trace)
64     cpu->state_event =
65         tmgr_history_add_trace(history, state_trace, 0.0, 0, cpu);
66
67   cpu->constraint =
68       lmm_constraint_new(cpu_model->model_private->maxmin_system, cpu,
69                          cpu->core * cpu->power_scale * cpu->power_peak);
70
71   /* Note (hypervisor): we create a constraint object for each CPU core, which
72    * is used for making a contraint problem of CPU affinity.
73    **/
74   {
75     /* At now, we assume that a VM does not have a multicore CPU. */
76     if (core > 1)
77       xbt_assert(cpu_model == surf_cpu_model_pm);
78
79     cpu->constraint_core = xbt_new(lmm_constraint_t, core);
80
81     unsigned long i;
82     for (i = 0; i < core; i++) {
83       /* just for a unique id, never used as a string. */
84       void *cnst_id = bprintf("%s:%lu", name, i);
85       cpu->constraint_core[i] =
86         lmm_constraint_new(cpu_model->model_private->maxmin_system, cnst_id,
87             cpu->power_scale * cpu->power_peak);
88     }
89   }
90
91   xbt_lib_set(host_lib, name, SURF_CPU_LEVEL, cpu);
92
93   return xbt_lib_get_elm_or_null(host_lib, name);;
94 }
95
96
97 static void parse_cpu_init(sg_platf_host_cbarg_t host)
98 {
99   /* This function is called when a platform file is parsed. Physical machines
100    * are defined there. Thus, we use the cpu model object for the physical
101    * machine layer. */
102   cpu_cas01_create_resource(host->id,
103                       host->power_peak,
104                       host->power_scale,
105                       host->power_trace,
106                       host->core_amount,
107                       host->initial_state,
108                       host->state_trace, host->properties,
109                       surf_cpu_model_pm);
110 }
111
112 static void cpu_add_traces_cpu(void)
113 {
114   xbt_dict_cursor_t cursor = NULL;
115   char *trace_name, *elm;
116   static int called = 0;
117   if (called)
118     return;
119   called = 1;
120
121   /* connect all traces relative to hosts */
122   xbt_dict_foreach(trace_connect_list_host_avail, cursor, trace_name, elm) {
123     tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
124     cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
125
126     xbt_assert(host, "Host %s undefined", elm);
127     xbt_assert(trace, "Trace %s undefined", trace_name);
128
129     host->state_event =
130         tmgr_history_add_trace(history, trace, 0.0, 0, host);
131   }
132
133   xbt_dict_foreach(trace_connect_list_power, cursor, trace_name, elm) {
134     tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
135     cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
136
137     xbt_assert(host, "Host %s undefined", elm);
138     xbt_assert(trace, "Trace %s undefined", trace_name);
139
140     host->power_event =
141         tmgr_history_add_trace(history, trace, 0.0, 0, host);
142   }
143 }
144
145 static void cpu_define_callbacks_cas01()
146 {
147   sg_platf_host_add_cb(parse_cpu_init);
148   sg_platf_postparse_add_cb(cpu_add_traces_cpu);
149 }
150
151 static int cpu_resource_used(void *resource)
152 {
153   surf_model_t cpu_model = ((surf_resource_t) resource)->model;
154
155   /* Note (hypervisor): we do not need to look up constraint_core[i] here. Even
156    * when a task is pinned or not, its variable object is always linked to the
157    * basic contraint object.
158    **/
159
160   return lmm_constraint_used(cpu_model->model_private->maxmin_system,
161                              ((cpu_Cas01_t) resource)->constraint);
162 }
163
164 static double cpu_share_resources_lazy(surf_model_t cpu_model, double now)
165 {
166   return generic_share_resources_lazy(now, cpu_model);
167 }
168
169 static double cpu_share_resources_full(surf_model_t cpu_model, double now)
170 {
171   s_surf_action_cpu_Cas01_t action;
172   return generic_maxmin_share_resources(cpu_model->states.
173                                         running_action_set,
174                                         xbt_swag_offset(action,
175                                                         generic_lmm_action.
176                                                         variable),
177                                         cpu_model->model_private->maxmin_system, lmm_solve);
178 }
179
180 static void cpu_update_actions_state_lazy(surf_model_t cpu_model, double now, double delta)
181 {
182   generic_update_actions_state_lazy(now, delta, cpu_model);
183 }
184
185 static void cpu_update_actions_state_full(surf_model_t cpu_model, double now, double delta)
186 {
187   generic_update_actions_state_full(now, delta, cpu_model);
188 }
189
190 static void cpu_update_resource_state(void *id,
191                                       tmgr_trace_event_t event_type,
192                                       double value, double date)
193 {
194   cpu_Cas01_t cpu = id;
195   lmm_variable_t var = NULL;
196   lmm_element_t elem = NULL;
197   surf_model_t cpu_model = ((surf_resource_t) cpu)->model;
198
199   surf_watched_hosts();
200
201   if (event_type == cpu->power_event) {
202     /* TODO (Hypervisor): do the same thing for constraint_core[i] */
203     xbt_assert(cpu->core == 1, "FIXME: add power scaling code also for constraint_core[i]");
204
205     cpu->power_scale = value;
206     lmm_update_constraint_bound(cpu_model->model_private->maxmin_system, cpu->constraint,
207                                 cpu->core * cpu->power_scale *
208                                 cpu->power_peak);
209 #ifdef HAVE_TRACING
210     TRACE_surf_host_set_power(date, cpu->generic_resource.name,
211                               cpu->core * cpu->power_scale *
212                               cpu->power_peak);
213 #endif
214     while ((var = lmm_get_var_from_cnst
215             (cpu_model->model_private->maxmin_system, cpu->constraint, &elem))) {
216       surf_action_cpu_Cas01_t action = lmm_variable_id(var);
217       lmm_update_variable_bound(cpu_model->model_private->maxmin_system,
218                                 GENERIC_LMM_ACTION(action).variable,
219                                 cpu->power_scale * cpu->power_peak);
220     }
221     if (tmgr_trace_event_free(event_type))
222       cpu->power_event = NULL;
223   } else if (event_type == cpu->state_event) {
224     /* TODO (Hypervisor): do the same thing for constraint_core[i] */
225     xbt_assert(cpu->core == 1, "FIXME: add state change code also for constraint_core[i]");
226
227     if (value > 0)
228       cpu->state_current = SURF_RESOURCE_ON;
229     else {
230       lmm_constraint_t cnst = cpu->constraint;
231
232       cpu->state_current = SURF_RESOURCE_OFF;
233
234       while ((var = lmm_get_var_from_cnst(cpu_model->model_private->maxmin_system, cnst, &elem))) {
235         surf_action_t action = lmm_variable_id(var);
236
237         if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
238             surf_action_state_get(action) == SURF_ACTION_READY ||
239             surf_action_state_get(action) ==
240             SURF_ACTION_NOT_IN_THE_SYSTEM) {
241           action->finish = date;
242           surf_action_state_set(action, SURF_ACTION_FAILED);
243         }
244       }
245     }
246     if (tmgr_trace_event_free(event_type))
247       cpu->state_event = NULL;
248   } else {
249     XBT_CRITICAL("Unknown event ! \n");
250     xbt_abort();
251   }
252
253   return;
254 }
255
256
257 /*
258  *
259  * This function formulates a constraint problem that pins a given task to
260  * particular cores. Currently, it is possible to pin a task to an exactly one
261  * specific core. The system links the variable object of the task to the
262  * per-core constraint object.
263  *
264  * But, the taskset command on Linux takes a mask value specifying a CPU
265  * affinity setting of a given task. If the mask value is 0x03, the given task
266  * will be executed on the first core (CPU0) or the second core (CPU1) on the
267  * given PM. The schedular will determine appropriate placements of tasks,
268  * considering given CPU affinities and task activities.
269  *
270  * How should the system formulate constraint problems for an affinity to
271  * multiple cores?
272  *
273  * The cpu argument must be the host where the task is being executed. The
274  * action object does not have the information about the location where the
275  * action is being executed.
276  */
277 static void cpu_action_set_affinity(surf_action_t action, void *cpu, unsigned long mask)
278 {
279   lmm_variable_t var_obj = ((surf_action_lmm_t) action)->variable;
280
281   surf_model_t cpu_model = action->model_obj;
282   xbt_assert(cpu_model->type == SURF_MODEL_TYPE_CPU);
283   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
284
285   XBT_IN("(%p,%lx)", action, mask);
286
287   {
288     unsigned long nbits = 0;
289
290     /* FIXME: There is much faster algorithms doing this. */
291     unsigned long i;
292     for (i = 0; i < CPU->core; i++) {
293       unsigned long has_affinity = (1UL << i) & mask;
294       if (has_affinity)
295         nbits += 1;
296     }
297
298     if (nbits > 1) {
299       XBT_CRITICAL("Do not specify multiple cores for an affinity mask.");
300       XBT_CRITICAL("See the comment in cpu_action_set_affinity().");
301       DIE_IMPOSSIBLE;
302     }
303   }
304
305
306
307   unsigned long i;
308   for (i = 0; i < CPU->core; i++) {
309     XBT_DEBUG("clear affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
310     lmm_shrink(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj);
311
312     unsigned long has_affinity = (1UL << i) & mask;
313     if (has_affinity) {
314       /* This function only accepts an affinity setting on the host where the
315        * task is now running. In future, a task might move to another host.
316        * But, at this moment, this function cannot take an affinity setting on
317        * that future host.
318        *
319        * It might be possible to extend the code to allow this function to
320        * accept affinity settings on a future host. We might be able to assign
321        * zero to elem->value to maintain such inactive affinity settings in the
322        * system. But, this will make the system complex. */
323       XBT_DEBUG("set affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
324       lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj, 1.0);
325     }
326   }
327
328   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
329     /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */
330   }
331
332   XBT_OUT();
333 }
334
335 static surf_action_t cpu_execute(void *cpu, double size)
336 {
337   surf_action_cpu_Cas01_t action = NULL;
338   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
339   surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
340
341   XBT_IN("(%s,%g)", surf_resource_name(CPU), size);
342   action =
343       surf_action_new(sizeof(s_surf_action_cpu_Cas01_t), size,
344                       cpu_model,
345                       CPU->state_current != SURF_RESOURCE_ON);
346
347   GENERIC_LMM_ACTION(action).suspended = 0;     /* Should be useless because of the
348                                                    calloc but it seems to help valgrind... */
349
350   /* Note (hypervisor): here, the bound value of the variable is set to the
351    * capacity of a CPU core. But, after MSG_{task/vm}_set_bound() were added to
352    * the hypervisor branch, this bound value is overwritten in
353    * SIMIX_host_execute().
354    * TODO: cleanup this.
355    */
356   GENERIC_LMM_ACTION(action).variable =
357       lmm_variable_new(cpu_model->model_private->maxmin_system, action,
358                        GENERIC_ACTION(action).priority,
359                        CPU->power_scale * CPU->power_peak, 1 + CPU->core); // the basic constraint plus core-specific constraints
360   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
361     GENERIC_LMM_ACTION(action).index_heap = -1;
362     GENERIC_LMM_ACTION(action).last_update = surf_get_clock();
363     GENERIC_LMM_ACTION(action).last_value = 0.0;
364   }
365   lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint,
366              GENERIC_LMM_ACTION(action).variable, 1.0);
367   XBT_OUT();
368   return (surf_action_t) action;
369 }
370
371 static surf_action_t cpu_action_sleep(void *cpu, double duration)
372 {
373   surf_action_cpu_Cas01_t action = NULL;
374   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
375   surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
376
377   if (duration > 0)
378     duration = MAX(duration, MAXMIN_PRECISION);
379
380   XBT_IN("(%s,%g)", surf_resource_name(surf_cpu_resource_priv(cpu)), duration);
381   action = (surf_action_cpu_Cas01_t) cpu_execute(cpu, 1.0);
382   // FIXME: sleep variables should not consume 1.0 in lmm_expand
383   GENERIC_ACTION(action).max_duration = duration;
384   GENERIC_LMM_ACTION(action).suspended = 2;
385   if (duration == NO_MAX_DURATION) {
386     /* Move to the *end* of the corresponding action set. This convention
387        is used to speed up update_resource_state  */
388     xbt_swag_remove(action, ((surf_action_t) action)->state_set);
389     ((surf_action_t) action)->state_set =
390         cpu_running_action_set_that_does_not_need_being_checked;
391     xbt_swag_insert(action, ((surf_action_t) action)->state_set);
392   }
393
394   lmm_update_variable_weight(cpu_model->model_private->maxmin_system,
395                              GENERIC_LMM_ACTION(action).variable, 0.0);
396   if (cpu_model->model_private->update_mechanism == UM_LAZY) {     // remove action from the heap
397     surf_action_lmm_heap_remove(cpu_model->model_private->action_heap,(surf_action_lmm_t)action);
398     // this is necessary for a variable with weight 0 since such
399     // variables are ignored in lmm and we need to set its max_duration
400     // correctly at the next call to share_resources
401     xbt_swag_insert_at_head(action, cpu_model->model_private->modified_set);
402   }
403
404   XBT_OUT();
405   return (surf_action_t) action;
406 }
407
408 static e_surf_resource_state_t cpu_get_state(void *cpu)
409 {
410   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current;
411 }
412
413 static void cpu_set_state(void *cpu, e_surf_resource_state_t state)
414 {
415   ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current = state;
416 }
417
418 static double cpu_get_speed(void *cpu, double load)
419 {
420   return load * ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
421 }
422
423 static int cpu_get_core(void *cpu)
424 {
425   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->core;
426 }
427 static double cpu_get_available_speed(void *cpu)
428 {
429   /* number between 0 and 1 */
430   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_scale;
431 }
432
433 static void cpu_finalize(surf_model_t cpu_model)
434 {
435   lmm_system_free(cpu_model->model_private->maxmin_system);
436   cpu_model->model_private->maxmin_system = NULL;
437
438   if (cpu_model->model_private->action_heap)
439     xbt_heap_free(cpu_model->model_private->action_heap);
440   xbt_swag_free(cpu_model->model_private->modified_set);
441
442   surf_model_exit(cpu_model);
443   cpu_model = NULL;
444
445   xbt_swag_free(cpu_running_action_set_that_does_not_need_being_checked);
446   cpu_running_action_set_that_does_not_need_being_checked = NULL;
447 }
448
449 static surf_model_t surf_cpu_model_init_cas01(void)
450 {
451   s_surf_action_t action;
452   s_surf_action_cpu_Cas01_t comp;
453
454   char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
455   int select =
456       xbt_cfg_get_boolean(_sg_cfg_set, "cpu/maxmin_selective_update");
457
458   surf_model_t cpu_model = surf_model_init();
459
460   if (!strcmp(optim, "Full")) {
461     cpu_model->model_private->update_mechanism = UM_FULL;
462     cpu_model->model_private->selective_update = select;
463   } else if (!strcmp(optim, "Lazy")) {
464     cpu_model->model_private->update_mechanism = UM_LAZY;
465     cpu_model->model_private->selective_update = 1;
466     xbt_assert((select == 1)
467                ||
468                (xbt_cfg_is_default_value
469                 (_sg_cfg_set, "cpu/maxmin_selective_update")),
470                "Disabling selective update while using the lazy update mechanism is dumb!");
471   } else {
472     xbt_die("Unsupported optimization (%s) for this model", optim);
473   }
474
475   cpu_running_action_set_that_does_not_need_being_checked =
476       xbt_swag_new(xbt_swag_offset(action, state_hookup));
477
478   cpu_model->name = "cpu";
479   cpu_model->type = SURF_MODEL_TYPE_CPU;
480
481   cpu_model->action_unref = surf_action_unref;
482   cpu_model->action_cancel = surf_action_cancel;
483   cpu_model->action_state_set = surf_action_state_set;
484
485   cpu_model->model_private->resource_used = cpu_resource_used;
486
487   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
488     cpu_model->model_private->share_resources =
489         cpu_share_resources_lazy;
490     cpu_model->model_private->update_actions_state =
491         cpu_update_actions_state_lazy;
492   } else if (cpu_model->model_private->update_mechanism == UM_FULL) {
493     cpu_model->model_private->share_resources =
494         cpu_share_resources_full;
495     cpu_model->model_private->update_actions_state =
496         cpu_update_actions_state_full;
497   } else
498     xbt_die("Invalid cpu update mechanism!");
499
500   cpu_model->model_private->update_resource_state =
501       cpu_update_resource_state;
502   cpu_model->model_private->finalize = cpu_finalize;
503
504   cpu_model->suspend = surf_action_suspend;
505   cpu_model->resume = surf_action_resume;
506   cpu_model->is_suspended = surf_action_is_suspended;
507   cpu_model->set_max_duration = surf_action_set_max_duration;
508   cpu_model->set_priority = surf_action_set_priority;
509   cpu_model->set_bound = surf_action_set_bound;
510   cpu_model->set_affinity = cpu_action_set_affinity;
511 #ifdef HAVE_TRACING
512   cpu_model->set_category = surf_action_set_category;
513 #endif
514   cpu_model->get_remains = surf_action_get_remains;
515
516   cpu_model->extension.cpu.execute = cpu_execute;
517   cpu_model->extension.cpu.sleep = cpu_action_sleep;
518
519   cpu_model->extension.cpu.get_state = cpu_get_state;
520   cpu_model->extension.cpu.set_state = cpu_set_state;
521   cpu_model->extension.cpu.get_core = cpu_get_core;
522   cpu_model->extension.cpu.get_speed = cpu_get_speed;
523   cpu_model->extension.cpu.get_available_speed =
524       cpu_get_available_speed;
525   cpu_model->extension.cpu.add_traces = cpu_add_traces_cpu;
526
527   if (!cpu_model->model_private->maxmin_system) {
528     cpu_model->model_private->maxmin_system = lmm_system_new(cpu_model->model_private->selective_update);
529   }
530   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
531     cpu_model->model_private->action_heap = xbt_heap_new(8, NULL);
532     xbt_heap_set_update_callback(cpu_model->model_private->action_heap,
533         surf_action_lmm_update_index_heap);
534     cpu_model->model_private->modified_set =
535         xbt_swag_new(xbt_swag_offset(comp, generic_lmm_action.action_list_hookup));
536     cpu_model->model_private->maxmin_system->keep_track = cpu_model->model_private->modified_set;
537   }
538
539   return cpu_model;
540 }
541
542 /*********************************************************************/
543 /* Basic sharing model for CPU: that is where all this started... ;) */
544 /*********************************************************************/
545 /* @InProceedings{casanova01simgrid, */
546 /*   author =       "H. Casanova", */
547 /*   booktitle =    "Proceedings of the IEEE Symposium on Cluster Computing */
548 /*                  and the Grid (CCGrid'01)", */
549 /*   publisher =    "IEEE Computer Society", */
550 /*   title =        "Simgrid: {A} Toolkit for the Simulation of Application */
551 /*                  Scheduling", */
552 /*   year =         "2001", */
553 /*   month =        may, */
554 /*   note =         "Available at */
555 /*                  \url{http://grail.sdsc.edu/papers/simgrid_ccgrid01.ps.gz}." */
556 /* } */
557
558
559 void surf_cpu_model_init_Cas01(void)
560 {
561   char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
562
563   xbt_assert(!surf_cpu_model_pm);
564   xbt_assert(!surf_cpu_model_vm);
565
566   if (strcmp(optim, "TI") == 0) {
567     /* FIXME: do we have to supprot TI? for VM */
568     surf_cpu_model_pm = surf_cpu_model_init_ti();
569     XBT_INFO("TI model is used (it will crashed since this is the hypervisor branch)");
570   } else {
571     surf_cpu_model_pm  = surf_cpu_model_init_cas01();
572     surf_cpu_model_vm  = surf_cpu_model_init_cas01();
573
574     /* cpu_model is registered only to model_list, and not to
575      * model_list_invoke. The shared_resource callback function will be called
576      * from that of the workstation model. */
577     xbt_dynar_push(model_list, &surf_cpu_model_pm);
578     xbt_dynar_push(model_list, &surf_cpu_model_vm);
579
580     cpu_define_callbacks_cas01();
581   }
582 }
583
584 /* TODO: do we address nested virtualization later? */
585 #if 0
586 surf_model_t cpu_model_cas01(int level){
587         // TODO this table should be allocated
588         if(!surf_cpu_model[level])
589          // allocate it
590         return surf_cpu_model[level];
591 }
592 #endif