Rev 567 | Rev 958 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | pj | 1 | /* |
2 | * Project: S.Ha.R.K. |
||
3 | * |
||
4 | * Coordinators: |
||
5 | * Giorgio Buttazzo <giorgio@sssup.it> |
||
6 | * Paolo Gai <pj@gandalf.sssup.it> |
||
7 | * |
||
8 | * Authors : |
||
9 | * Paolo Gai <pj@gandalf.sssup.it> |
||
10 | * (see the web pages for full authors list) |
||
11 | * |
||
12 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
13 | * |
||
14 | * http://www.sssup.it |
||
15 | * http://retis.sssup.it |
||
16 | * http://shark.sssup.it |
||
17 | */ |
||
18 | |||
19 | /** |
||
20 | ------------ |
||
568 | giacomo | 21 | CVS : $Id: kern.c,v 1.12 2004-04-19 12:36:39 giacomo Exp $ |
2 | pj | 22 | |
23 | File: $File$ |
||
568 | giacomo | 24 | Revision: $Revision: 1.12 $ |
25 | Last update: $Date: 2004-04-19 12:36:39 $ |
||
2 | pj | 26 | ------------ |
27 | |||
28 | This file contains: |
||
29 | |||
30 | - the kernel system variables |
||
31 | |||
32 | - the errno functions |
||
33 | |||
34 | - the scheduler, capacity timer, and grarantee |
||
35 | |||
36 | - the sys_abort, sys_end, sys_gettime |
||
37 | |||
38 | |||
39 | **/ |
||
40 | |||
41 | /* |
||
42 | * Copyright (C) 2000 Paolo Gai |
||
43 | * |
||
44 | * This program is free software; you can redistribute it and/or modify |
||
45 | * it under the terms of the GNU General Public License as published by |
||
46 | * the Free Software Foundation; either version 2 of the License, or |
||
47 | * (at your option) any later version. |
||
48 | * |
||
49 | * This program is distributed in the hope that it will be useful, |
||
50 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
51 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
52 | * GNU General Public License for more details. |
||
53 | * |
||
54 | * You should have received a copy of the GNU General Public License |
||
55 | * along with this program; if not, write to the Free Software |
||
56 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
57 | * |
||
58 | */ |
||
59 | |||
60 | #include <stdarg.h> |
||
61 | #include <ll/ll.h> |
||
62 | #include <ll/stdlib.h> |
||
63 | #include <ll/stdio.h> |
||
64 | #include <ll/string.h> |
||
65 | #include <kernel/config.h> |
||
66 | #include <kernel/model.h> |
||
67 | #include <kernel/const.h> |
||
68 | #include <sys/types.h> |
||
69 | #include <kernel/types.h> |
||
70 | #include <kernel/descr.h> |
||
71 | #include <errno.h> |
||
72 | #include <kernel/var.h> |
||
73 | #include <kernel/func.h> |
||
74 | |||
353 | giacomo | 75 | #include <tracer.h> |
76 | |||
2 | pj | 77 | /*----------------------------------------------------------------------*/ |
78 | /* Kernel System variables */ |
||
79 | /*----------------------------------------------------------------------*/ |
||
80 | |||
81 | int global_errnumber; /*+ Errno used in system initialization +*/ |
||
82 | CONTEXT global_context; /*+ Context used during initialization; |
||
83 | It references also a safe stack +*/ |
||
84 | |||
85 | int task_counter; /*+ Application task counter. It represent |
||
86 | the number of Application tasks in the |
||
87 | system. When all Application Tasks end, |
||
88 | also the system ends. +*/ |
||
89 | |||
90 | int system_counter; /*+ System task counter. It represent |
||
91 | the number of System tasks in the |
||
92 | system with the NO_KILL flag reset. |
||
93 | When all Application Tasks end, |
||
94 | the system waits for the end of the |
||
95 | system tasks and then it ends. +*/ |
||
96 | |||
97 | PID exec; /*+ Task advised by the scheduler +*/ |
||
98 | PID exec_shadow; /*+ Currently executing task +*/ |
||
99 | |||
29 | pj | 100 | IQUEUE freedesc; /*+ Free descriptor handled as a queue +*/ |
2 | pj | 101 | |
102 | DWORD sys_tick; /*+ System tick (in usec) +*/ |
||
103 | struct timespec schedule_time; |
||
104 | /*+ Timer read at each call to schedule()+*/ |
||
105 | |||
106 | int cap_timer; /*+ the capacity event posted when the |
||
107 | task starts +*/ |
||
108 | struct timespec cap_lasttime; |
||
109 | /*+ the time at whitch the capacity |
||
110 | event is posted. Normally, it is |
||
111 | equal to schedule_time +*/ |
||
112 | |||
113 | |||
114 | |||
115 | DWORD sched_levels; /*+ Schedule levels active in the system +*/ |
||
116 | DWORD res_levels; /*+ Resource levels active in the system +*/ |
||
117 | |||
118 | /*+ Process descriptor table +*/ |
||
119 | proc_des proc_table[MAX_PROC]; |
||
120 | |||
38 | pj | 121 | /* Scheduling modules descriptor table */ |
122 | /* ------------------------------------------------------------------------ */ |
||
123 | |||
124 | /* the descriptor table */ |
||
2 | pj | 125 | level_des *level_table[MAX_SCHED_LEVEL]; |
38 | pj | 126 | /* ... and the size of each descriptor */ |
127 | size_t level_size[MAX_SCHED_LEVEL]; |
||
2 | pj | 128 | |
38 | pj | 129 | /* an utilization counter incremented if a level is used by another module */ |
130 | int level_used[MAX_SCHED_LEVEL]; |
||
131 | /* these data structures (first, last, free, next & prev) |
||
132 | are used to implement a double linked list of scheduling modules. |
||
133 | That list is used by the scheduler to call the module's schedulers. */ |
||
134 | int level_first; /* first module in the list */ |
||
135 | int level_last; /* last module in the list */ |
||
136 | int level_free; /* free single linked list of free module descriptors. */ |
||
137 | int level_next[MAX_SCHED_LEVEL]; |
||
138 | int level_prev[MAX_SCHED_LEVEL]; |
||
139 | /* ------------------------------------------------------------------------ */ |
||
140 | |||
2 | pj | 141 | /*+ Resource descriptor table +*/ |
142 | resource_des *resource_table[MAX_RES_LEVEL]; |
||
143 | |||
144 | /*+ This variable is set by the system call sys_end() or sys_abort(). |
||
145 | When a sys_end() or sys_abort is called into an event handler, |
||
146 | we don't have to change context in the reschedule(). |
||
147 | look at kernel/event.c +*/ |
||
148 | int mustexit = 0; |
||
149 | |||
150 | /*+ this is the system runlevel... it may be from 0 to 4: |
||
151 | |||
152 | 1 - running |
||
153 | 2 - shutdown |
||
154 | 3 - before halting |
||
155 | 4 - halting |
||
156 | +*/ |
||
157 | int runlevel; |
||
158 | |||
159 | /*+ this variable is set to 1 into call_runlevel_func (look at init.c) |
||
160 | ad it is used because the task_activate (look at activate.c) must |
||
161 | work in a different way when the system is in the global_context +*/ |
||
162 | int calling_runlevel_func; |
||
163 | |||
164 | |||
165 | /*----------------------------------------------------------------------*/ |
||
166 | /* Kernel internal functions */ |
||
167 | /*----------------------------------------------------------------------*/ |
||
168 | |||
169 | /*+ errno Handling: this functions returns the correct address for errno. |
||
170 | The address returned can be either the global errno or the errno local |
||
171 | to the execution task */ |
||
172 | static int *__errnumber() |
||
173 | { |
||
174 | if (exec_shadow == -1) |
||
175 | return &global_errnumber; |
||
176 | else |
||
177 | return &(proc_table[exec_shadow].errnumber); |
||
178 | } |
||
179 | |||
180 | /*+ this is the capacity timer. it fires when the running task has expired |
||
181 | his time contained in the avail_time field. The event is tipically |
||
182 | posted in the scheduler() after the task_dispatch. The task_dispatch |
||
183 | can modify the avail_time field to reach his scheduling purposes. |
||
184 | The wcet field is NOT used in the Generic kernel. it is initialized at |
||
185 | init time to 0. +*/ |
||
186 | void capacity_timer(void *arg) |
||
187 | { |
||
188 | /* the capacity event is served, so at the epilogue we |
||
189 | don't have to erase it */ |
||
190 | cap_timer = NIL; |
||
191 | |||
192 | // kern_printf("cap%d ",exec_shadow); |
||
193 | |||
194 | /* When we reschedule, the call to task_epilogue check the slice and |
||
195 | put the task in the queue's tail */ |
||
196 | event_need_reschedule(); |
||
197 | } |
||
198 | |||
199 | /*+ |
||
200 | Generic Scheduler: |
||
201 | This function select the next task that should be executed. |
||
202 | The selection is made calling the level schedulers. |
||
203 | It assume that THERE IS a task that can be scheduled in one |
||
204 | level. |
||
205 | |||
206 | The general scheduler: |
||
207 | - first, it checks for interrupts. |
||
208 | - then, it calls the epilogue of the task pointed in exec_shadow |
||
209 | - after that, it calls the level schedulers |
||
210 | - then it sets exec and it follows the shadow chain |
||
211 | - finally it calls task_dispatch for the new task (the shadow!!!), |
||
212 | saying if exec != exec_shadow |
||
213 | |||
214 | +*/ |
||
215 | void scheduler(void) |
||
216 | { |
||
217 | LEVEL l; /* a counter */ |
||
218 | struct timespec ty; /* a dummy used for time computation */ |
||
219 | |||
220 | PID p; /* p is the task chosen by the level scheduler */ |
||
221 | int ok; /* 1 only if the task chosen by the level scheduler |
||
222 | is eligible (normally, it is; but in some server |
||
223 | it is not always true (i.e., CBS)) */ |
||
224 | |||
225 | PID old_exec_shadow; |
||
226 | |||
227 | if ( (exec_shadow != -1 && |
||
228 | (proc_table[exec_shadow].control & NO_PREEMPT) ) ) |
||
229 | return; |
||
230 | |||
38 | pj | 231 | // kern_printf("(!"); |
232 | |||
2 | pj | 233 | /* |
234 | exec_shadow = exec = -1 only if the scheduler is called from: |
||
235 | . task_endcycle |
||
236 | . task_kill |
||
237 | . task_extract |
||
238 | . task_sleep |
||
239 | . task_delay |
||
240 | and from the system startup routines. |
||
241 | |||
242 | Normally, the scheduler is called with exec & co != -1... |
||
243 | |||
244 | if exec & co. is set to -1 before calling scheduler(), the following |
||
245 | stuffs have to be executed before the call |
||
246 | - get the schedule_time |
||
247 | - account the capacity if necessary |
||
248 | - call an epilogue |
||
249 | */ |
||
250 | |||
38 | pj | 251 | /* then, we call the epilogue. the epilogue tipically checks the |
252 | avail_time field... */ |
||
2 | pj | 253 | if (exec_shadow != -1) { |
38 | pj | 254 | kern_epilogue_macro(); |
2 | pj | 255 | |
256 | l = proc_table[exec_shadow].task_level; |
||
38 | pj | 257 | level_table[l]->public_epilogue(l,exec_shadow); |
2 | pj | 258 | } |
259 | |||
38 | pj | 260 | // kern_printf("["); |
261 | |||
262 | l = level_first; |
||
2 | pj | 263 | for(;;) { |
264 | do { |
||
38 | pj | 265 | p = level_table[l]->public_scheduler(l); |
266 | // kern_printf("p=%d",p); |
||
2 | pj | 267 | if (p != NIL) |
268 | ok = level_table[ proc_table[p].task_level ]-> |
||
38 | pj | 269 | public_eligible(proc_table[p].task_level,p); |
2 | pj | 270 | else |
271 | ok = 0; |
||
38 | pj | 272 | // kern_printf(" ok=%d",ok); |
2 | pj | 273 | } while (ok < 0); /* repeat the level scheduler if the task isn't |
274 | eligible... (ex. in the aperiodic servers...) */ |
||
275 | if (p != NIL) break; |
||
276 | |||
38 | pj | 277 | l = level_next[l]; /* THERE MUST BE a level with a task to schedule */ |
278 | // kern_printf(" l=%d",l); |
||
2 | pj | 279 | }; |
280 | |||
38 | pj | 281 | // kern_printf("]"); |
282 | |||
2 | pj | 283 | /* we follow the shadow chain */ |
284 | old_exec_shadow=exec_shadow; |
||
285 | exec_shadow = exec = p; |
||
286 | while (exec_shadow != proc_table[exec_shadow].shadow) |
||
287 | exec_shadow = proc_table[exec_shadow].shadow; |
||
288 | |||
289 | /* tracer stuff */ |
||
502 | giacomo | 290 | TRACER_LOGEVENT(FTrace_EVT_task_schedule,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context); |
38 | pj | 291 | // kern_printf("[%i->%i]",old_exec_shadow,exec_shadow); |
2 | pj | 292 | |
293 | /* we control the correctness of the shadows when we kill */ |
||
294 | proc_table[exec_shadow].status = EXE; |
||
295 | |||
38 | pj | 296 | // kern_printf("(d%d)",exec_shadow); |
2 | pj | 297 | l = proc_table[exec_shadow].task_level; |
38 | pj | 298 | level_table[l]->public_dispatch(l, exec_shadow, exec!=exec_shadow); |
2 | pj | 299 | |
38 | pj | 300 | // kern_printf("*"); |
301 | |||
2 | pj | 302 | /* Finally,we post the capacity event, BUT |
303 | . only if the task require that |
||
304 | . only if exec==exec_shadow (if a task is blocked we don't want |
||
305 | to check the capacity!!!) */ |
||
306 | if ((proc_table[exec_shadow].control & CONTROL_CAP) |
||
307 | && exec==exec_shadow) { |
||
308 | TIMESPEC_ASSIGN(&ty, &schedule_time); |
||
309 | ADDUSEC2TIMESPEC(proc_table[exec_shadow].avail_time,&ty); |
||
38 | pj | 310 | // kern_printf("³s%d ns%d sched s%d ns%d³",ty.tv_sec,ty.tv_nsec, schedule_time.tv_sec, schedule_time.tv_nsec); |
2 | pj | 311 | cap_timer = kern_event_post(&ty, capacity_timer, NULL); |
312 | } |
||
313 | /* set the time at witch the task is scheduled */ |
||
314 | TIMESPEC_ASSIGN(&cap_lasttime, &schedule_time); |
||
315 | |||
38 | pj | 316 | // kern_printf("(s%d)",exec_shadow); |
2 | pj | 317 | } |
318 | |||
319 | |||
320 | /*+ |
||
321 | Guarantee: |
||
322 | This function guarantees the system: it calls the |
||
323 | level_guarantee of each level that have that function != NULL |
||
324 | |||
325 | The guarantee is based on a utilization factor basis. |
||
326 | We mantain only a DWORD. num has to be interpreted as num/MAX_DWORD |
||
327 | free bandwidth. |
||
328 | +*/ |
||
329 | int guarantee() |
||
330 | { |
||
331 | bandwidth_t num=MAX_BANDWIDTH; |
||
332 | int l; |
||
333 | |||
38 | pj | 334 | for (l =0; l<MAX_SCHED_LEVEL && level_table[l]->public_guarantee; l++) |
335 | if (!level_table[l]->public_guarantee(l,&num)) |
||
2 | pj | 336 | return -1; |
337 | |||
338 | return 0; /* OK */ |
||
339 | } |
||
340 | |||
341 | /*----------------------------------------------------------------------*/ |
||
342 | /* Context switch handling functions */ |
||
343 | /*----------------------------------------------------------------------*/ |
||
344 | /* this function is called every time a context change occurs, |
||
345 | when a task is preempted by an event called into an IRQ */ |
||
346 | void kern_after_dispatch() |
||
347 | { |
||
348 | /* every time a task wakes up from an IRQ, it has to check for async |
||
349 | cancellation */ |
||
350 | check_killed_async(); |
||
351 | |||
352 | /* Then, look for pending signal delivery */ |
||
353 | kern_deliver_pending_signals(); |
||
354 | } |
||
355 | |||
356 | /*----------------------------------------------------------------------*/ |
||
157 | pj | 357 | /* Abort strings */ |
358 | /*----------------------------------------------------------------------*/ |
||
359 | |||
360 | const char *const _sys_abrtlist[] = { |
||
361 | "zero - no error", |
||
362 | "OSLib exception", |
||
363 | "generic unhandled signal raised", |
||
364 | "error in signal_init", |
||
365 | "default exception handler code", |
||
366 | "ARP table full" |
||
367 | }; |
||
368 | |||
369 | /*----------------------------------------------------------------------*/ |
||
2 | pj | 370 | /* Kernel main system functions */ |
371 | /*----------------------------------------------------------------------*/ |
||
372 | |||
373 | /*+ |
||
374 | This function initialize |
||
375 | - the virtual machine (timer, interrupt, mem) |
||
376 | the system's structures (queues, tables) , & the two task main & |
||
377 | dummy, that are always present |
||
378 | +*/ |
||
45 | pj | 379 | void __kernel_init__(/* struct multiboot_info *multiboot */ void) |
2 | pj | 380 | { |
381 | int i,j; /* counters */ |
||
382 | |||
383 | struct ll_initparms parms; /* for the VM */ |
||
384 | |||
385 | int aborting; /* it is set if we are aborting the system */ |
||
386 | |||
45 | pj | 387 | struct multiboot_info *multiboot=mbi_address(); |
2 | pj | 388 | |
389 | |||
390 | |||
391 | /* |
||
392 | * Runlevel 0: kernel startup |
||
393 | * |
||
394 | * |
||
395 | */ |
||
396 | |||
38 | pj | 397 | runlevel = RUNLEVEL_STARTUP; |
2 | pj | 398 | |
399 | /* The kernel startup MUST proceed with int disabled! */ |
||
400 | kern_cli(); |
||
401 | |||
402 | /* First we initialize the memory allocator, because it is needed by |
||
403 | __kernel_register_levels__ */ |
||
404 | kern_mem_init(multiboot); |
||
405 | |||
406 | /* Clear the task descriptors */ |
||
407 | for (i = 0; i < MAX_PROC; i++) { |
||
408 | proc_table[i].task_level = -1; |
||
409 | proc_table[i].stack = NULL; |
||
410 | proc_table[i].name[0] = 0; |
||
411 | proc_table[i].status = FREE; |
||
412 | proc_table[i].pclass = 0; |
||
413 | proc_table[i].group = 0; |
||
414 | proc_table[i].stacksize = 0; |
||
415 | proc_table[i].control = 0; |
||
416 | proc_table[i].frozen_activations = 0; |
||
417 | proc_table[i].sigmask = 0; |
||
418 | proc_table[i].sigpending = 0; |
||
419 | proc_table[i].avail_time = 0; |
||
420 | proc_table[i].shadow = i; |
||
421 | proc_table[i].cleanup_stack= NULL; |
||
422 | proc_table[i].errnumber = 0; |
||
29 | pj | 423 | //proc_table[i].priority = 0; |
424 | //NULL_TIMESPEC(&proc_table[i].timespec_priority); |
||
2 | pj | 425 | proc_table[i].delay_timer = -1; |
426 | proc_table[i].wcet = -1; |
||
427 | |||
428 | proc_table[i].jet_tvalid = 0; |
||
429 | proc_table[i].jet_curr = 0; |
||
430 | proc_table[i].jet_max = 0; |
||
431 | proc_table[i].jet_sum = 0; |
||
432 | proc_table[i].jet_n = 0; |
||
433 | for (j=0; j<JET_TABLE_DIM; j++) |
||
434 | proc_table[i].jet_table[j] = 0; |
||
435 | |||
436 | proc_table[i].waiting_for_me = NIL; |
||
437 | proc_table[i].return_value = NULL; |
||
438 | |||
439 | for (j=0; j<PTHREAD_KEYS_MAX; j++) |
||
440 | proc_table[i].keys[j] = NULL; |
||
441 | } |
||
442 | |||
29 | pj | 443 | /* set up the free descriptor queue */ |
444 | // for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1; |
||
445 | // proc_table[MAX_PROC-1].next = NIL; |
||
446 | // for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1; |
||
447 | // proc_table[0].prev = NIL; |
||
448 | // freedesc = 0; |
||
449 | iq_init(&freedesc, NULL, 0); |
||
450 | for (i = 0; i < MAX_PROC; i++) |
||
451 | iq_insertlast(i,&freedesc); |
||
452 | |||
2 | pj | 453 | /* Set up the varius stuff */ |
454 | global_errnumber = 0; |
||
455 | task_counter = 0; |
||
456 | system_counter = 0; |
||
457 | exec = -1; |
||
458 | exec_shadow = -1; |
||
459 | cap_timer = -1; |
||
460 | NULL_TIMESPEC(&cap_lasttime); |
||
461 | sched_levels = 0; /* They are not registered yet... */ |
||
462 | res_levels = 0; |
||
463 | calling_runlevel_func = 0; |
||
464 | |||
465 | /* Clear the key-specific data */ |
||
466 | task_specific_data_init(); |
||
467 | |||
468 | /* Clear exit and init functions */ |
||
469 | runlevel_init(); |
||
470 | |||
471 | /* Init VM layer (Interrupts, levels & memory management) */ |
||
472 | /* for old exception handling, use excirq_init() */ |
||
473 | signals_init(); |
||
38 | pj | 474 | set_default_exception_handler(); |
2 | pj | 475 | |
38 | pj | 476 | /* Clear scheduling modules registration data */ |
477 | levels_init(); |
||
478 | |||
2 | pj | 479 | sys_tick = __kernel_register_levels__(multiboot); |
480 | |||
481 | /* test on system tick */ |
||
482 | if (sys_tick>=55000) { |
||
483 | printk("The system tick must be less than 55 mSec!"); |
||
484 | l1_exit(0); |
||
485 | } |
||
486 | |||
487 | /* OSLib initialization */ |
||
488 | if (sys_tick) |
||
489 | parms.mode = LL_PERIODIC; |
||
490 | else |
||
491 | parms.mode = LL_ONESHOT; // one shot!!! |
||
492 | |||
493 | parms.tick = sys_tick; |
||
494 | |||
495 | /* |
||
38 | pj | 496 | * Runlevel INIT: Let's go!!!! |
2 | pj | 497 | * |
498 | * |
||
499 | */ |
||
500 | |||
501 | runlevel = RUNLEVEL_INIT; |
||
502 | |||
503 | ll_init(); |
||
504 | event_init(&parms); |
||
505 | seterrnumber(__errnumber); |
||
506 | event_setprologue(event_resetepilogue); |
||
507 | event_setlasthandler(kern_after_dispatch); |
||
508 | |||
509 | /* call the init functions */ |
||
510 | call_runlevel_func(RUNLEVEL_INIT, 0); |
||
511 | |||
38 | pj | 512 | |
513 | |||
514 | |||
515 | /* |
||
516 | * Runlevel RUNNING: Hoping that all works fine ;-) |
||
517 | * |
||
518 | * |
||
519 | */ |
||
520 | |||
521 | runlevel = RUNLEVEL_RUNNING; |
||
522 | |||
2 | pj | 523 | /* tracer stuff */ |
353 | giacomo | 524 | #ifdef __OLD_TRACER__ |
525 | trc_resume(); |
||
526 | #endif |
||
2 | pj | 527 | |
528 | /* exec and exec_shadow are already = -1 */ |
||
38 | pj | 529 | kern_gettime(&schedule_time); |
2 | pj | 530 | scheduler(); |
531 | global_context = ll_context_from(); /* It will be used by sys_end */ |
||
532 | ll_context_to(proc_table[exec_shadow].context); |
||
533 | |||
534 | /* |
||
535 | * |
||
536 | * Now the system starts!!! |
||
537 | * (hoping that someone has created some task(s) ) |
||
538 | * The function returns only at system end... |
||
539 | * |
||
540 | */ |
||
541 | |||
542 | |||
543 | /* |
||
38 | pj | 544 | * Runlevel SHUTDOWN: Shutting down the system... :-( |
2 | pj | 545 | * |
546 | * |
||
547 | */ |
||
548 | |||
549 | event_setlasthandler(NULL); |
||
550 | |||
551 | // ll_abort(666); |
||
552 | /* tracer stuff */ |
||
353 | giacomo | 553 | #ifdef __OLD_TRACER__ |
554 | trc_suspend(); |
||
555 | #endif |
||
2 | pj | 556 | |
567 | giacomo | 557 | remove_default_exception_handler(); |
558 | |||
2 | pj | 559 | runlevel = RUNLEVEL_SHUTDOWN; |
560 | |||
561 | /* 1 when the error code is != 0 */ |
||
562 | aborting = global_errnumber > 0; |
||
563 | |||
38 | pj | 564 | //kern_printf("after - system_counter=%d, task_counter = %d\n", system_counter,task_counter); |
2 | pj | 565 | |
566 | call_runlevel_func(RUNLEVEL_SHUTDOWN, aborting); |
||
567 | |||
38 | pj | 568 | //kern_printf("before - system_counter=%d, task_counter = %d\n", system_counter,task_counter); |
2 | pj | 569 | |
570 | if (system_counter) { |
||
571 | /* To shutdown the kernel correctly, we have to wait that all the SYSTEM |
||
572 | tasks that are killable will die... |
||
573 | |||
574 | We don't mess about the user task... we only kill them and reschedule |
||
575 | The only thing important is that the system tasks shut down correctly. |
||
576 | We do nothing for user tasks that remain active (because, for example, |
||
577 | they have the cancelability set to deferred) when the system goes to |
||
578 | runlevel 3 */ |
||
566 | giacomo | 579 | |
38 | pj | 580 | //kern_printf("Û%lu",kern_gettime(NULL)); |
2 | pj | 581 | kill_user_tasks(); |
38 | pj | 582 | //kern_printf("Û%lu",kern_gettime(NULL)); |
2 | pj | 583 | |
584 | /* we have to go again in multitasking mode!!! */ |
||
585 | mustexit = 0; |
||
586 | |||
587 | /* exec and exec_shadow are already = -1 */ |
||
38 | pj | 588 | kern_gettime(&schedule_time); |
2 | pj | 589 | global_context = ll_context_from(); /* It will be used by sys_end */ |
590 | scheduler(); |
||
591 | |||
592 | event_setlasthandler(kern_after_dispatch); |
||
593 | ll_context_to(proc_table[exec_shadow].context); |
||
594 | event_setlasthandler(NULL); |
||
595 | } |
||
596 | |||
597 | /* |
||
38 | pj | 598 | * Runlevel BEFORE_EXIT: Before Halting the system |
2 | pj | 599 | * |
600 | * |
||
601 | */ |
||
602 | |||
603 | runlevel = RUNLEVEL_BEFORE_EXIT; |
||
604 | |||
605 | /* the field global_errnumber is |
||
606 | =0 if the system normally ends |
||
607 | !=0 if an abort is issued |
||
608 | */ |
||
609 | |||
610 | //kern_printf("Chiamo exit Functions\n"); |
||
611 | |||
612 | call_runlevel_func(RUNLEVEL_BEFORE_EXIT, aborting); |
||
613 | |||
614 | //kern_printf("Dopo exit Functions\n"); |
||
615 | |||
616 | /* Shut down the VM layer */ |
||
617 | ll_end(); |
||
618 | |||
619 | /* |
||
38 | pj | 620 | * Runlevel AFTER_EXIT: After halting... |
2 | pj | 621 | * |
622 | * |
||
623 | */ |
||
624 | |||
625 | runlevel = RUNLEVEL_AFTER_EXIT; |
||
626 | |||
627 | //kern_printf("prima before Functions\n"); |
||
628 | |||
629 | call_runlevel_func(RUNLEVEL_AFTER_EXIT, 0); |
||
630 | |||
631 | //kern_printf("dopo before Functions\n"); |
||
632 | kern_cli(); |
||
633 | if (global_errnumber) { |
||
634 | /* vm_abort called */ |
||
157 | pj | 635 | kern_printf("Abort detected\nCode : %u (%s)\n",global_errnumber, |
636 | global_errnumber<=LAST_ABORT_NUMBER ? _sys_abrtlist[global_errnumber] : "no description" ); |
||
2 | pj | 637 | l1_exit(-1); |
638 | } |
||
639 | |||
640 | l1_exit(0); // System terminated normally |
||
641 | |||
642 | } |
||
643 | |||
38 | pj | 644 | /* IMPORTANT!!! |
645 | I'm almost sure the shutdown procedure does not work into interrupts. */ |
||
2 | pj | 646 | void internal_sys_end(int i) |
647 | { |
||
648 | LEVEL l; /* a counter */ |
||
38 | pj | 649 | |
650 | /* if something goes wron during the real mode */ |
||
651 | if (runlevel==RUNLEVEL_STARTUP || runlevel==RUNLEVEL_AFTER_EXIT) |
||
652 | l1_exit(i); |
||
2 | pj | 653 | |
654 | //kern_printf("mustexit=%d",mustexit); |
||
38 | pj | 655 | if (mustexit) |
656 | return; |
||
2 | pj | 657 | |
38 | pj | 658 | mustexit = 1; |
2 | pj | 659 | |
38 | pj | 660 | global_errnumber = i; |
661 | |||
662 | if (!ll_ActiveInt()) { |
||
663 | proc_table[exec_shadow].context = kern_context_save(); |
||
664 | |||
2 | pj | 665 | if (exec_shadow != -1) { |
38 | pj | 666 | kern_gettime(&schedule_time); |
667 | |||
668 | kern_epilogue_macro(); |
||
669 | |||
2 | pj | 670 | /* then, we call the epilogue. the epilogue tipically checks the |
38 | pj | 671 | avail_time field... */ |
2 | pj | 672 | l = proc_table[exec_shadow].task_level; |
38 | pj | 673 | level_table[l]->public_epilogue(l,exec_shadow); |
674 | |||
2 | pj | 675 | exec_shadow = exec = -1; |
676 | } |
||
38 | pj | 677 | kern_context_load(global_context); |
678 | } |
||
2 | pj | 679 | |
38 | pj | 680 | if (ll_ActiveInt()) { |
681 | ll_context_to(global_context); |
||
682 | /* The context change will be done when all the interrupts end!!! */ |
||
2 | pj | 683 | } |
38 | pj | 684 | |
2 | pj | 685 | //kern_printf("fine sysend"); |
686 | |||
687 | /* the control reach this line only if we call sys_end() into an event |
||
688 | handler (for example, if the event raises an exception with |
||
689 | SA_USEFAST active and the exception calls sys_end() ) */ |
||
690 | } |
||
691 | |||
692 | |||
38 | pj | 693 | /* |
694 | Close the system & return to HOST OS. |
||
568 | giacomo | 695 | Can be called from tasks and from ISRS, |
696 | but only during runlevel SHUTDOWN |
||
38 | pj | 697 | |
698 | */ |
||
699 | void sys_abort(int err) |
||
2 | pj | 700 | { |
701 | SYS_FLAGS f; |
||
702 | |||
568 | giacomo | 703 | /* Check if the system is in RUNNING mode */ |
704 | if (runlevel != RUNLEVEL_RUNNING) return; |
||
705 | |||
2 | pj | 706 | f = kern_fsave(); |
38 | pj | 707 | internal_sys_end(err); |
2 | pj | 708 | kern_frestore(f); |
568 | giacomo | 709 | |
2 | pj | 710 | } |
711 | |||
568 | giacomo | 712 | /* Close the system when we are in runlevel shutdown */ |
713 | void sys_abort_shutdown(int err) |
||
714 | { |
||
715 | SYS_FLAGS f; |
||
716 | |||
717 | /* Check if the system is in SHUTDOWN mode */ |
||
718 | if (runlevel != RUNLEVEL_SHUTDOWN) return; |
||
719 | |||
720 | f = kern_fsave(); |
||
721 | internal_sys_end(err); |
||
722 | kern_frestore(f); |
||
723 | |||
724 | } |
||
725 | |||
726 | |||
38 | pj | 727 | void sys_end(void) |
2 | pj | 728 | { |
38 | pj | 729 | sys_abort(0); |
2 | pj | 730 | } |
731 | |||
732 | void _exit(int status) |
||
733 | { |
||
38 | pj | 734 | sys_abort(status); |
2 | pj | 735 | } |
736 | |||
737 | |||
738 | |||
739 | /* this function is never called... used for the OSLib */ |
||
740 | void sys_abort_tail(int code) |
||
741 | { |
||
742 | //DUMMY!!!! |
||
743 | } |
||
744 | |||
745 | |||
746 | |||
747 | /*+ this primitive returns the time read from the system timer +*/ |
||
748 | TIME sys_gettime(struct timespec *t) |
||
749 | { |
||
750 | SYS_FLAGS f; |
||
751 | TIME x; |
||
752 | |||
753 | f = kern_fsave(); |
||
38 | pj | 754 | x = kern_gettime(t); |
2 | pj | 755 | kern_frestore(f); |
756 | |||
757 | return x; |
||
758 | } |
||
759 | |||
760 |