Subversion Repositories shark

Rev

Rev 38 | Rev 353 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   (see the web pages for full authors list)
11
 *
12
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
13
 *
14
 * http://www.sssup.it
15
 * http://retis.sssup.it
16
 * http://shark.sssup.it
17
 */
18
 
19
/**
20
 ------------
45 pj 21
 CVS :        $Id: kern.c,v 1.4 2003-01-30 09:56:51 pj Exp $
2 pj 22
 
23
 File:        $File$
45 pj 24
 Revision:    $Revision: 1.4 $
25
 Last update: $Date: 2003-01-30 09:56:51 $
2 pj 26
 ------------
27
 
28
 This file contains:
29
 
30
 - the kernel system variables
31
 
32
 - the errno functions
33
 
34
 - the scheduler, capacity timer, and grarantee
35
 
36
 - the sys_abort, sys_end, sys_gettime
37
 
38
 
39
**/
40
 
41
/*
42
 * Copyright (C) 2000 Paolo Gai
43
 *
44
 * This program is free software; you can redistribute it and/or modify
45
 * it under the terms of the GNU General Public License as published by
46
 * the Free Software Foundation; either version 2 of the License, or
47
 * (at your option) any later version.
48
 *
49
 * This program is distributed in the hope that it will be useful,
50
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
51
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
52
 * GNU General Public License for more details.
53
 *
54
 * You should have received a copy of the GNU General Public License
55
 * along with this program; if not, write to the Free Software
56
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
57
 *
58
 */
59
 
60
#include <stdarg.h>
61
#include <ll/ll.h>
62
#include <ll/stdlib.h>
63
#include <ll/stdio.h>
64
#include <ll/string.h>
65
#include <kernel/config.h>
66
#include <kernel/model.h>
67
#include <kernel/const.h>
68
#include <sys/types.h>
69
#include <kernel/types.h>
70
#include <kernel/descr.h>
71
#include <errno.h>
72
#include <kernel/var.h>
73
#include <kernel/func.h>
74
#include <kernel/trace.h>
75
 
76
/*----------------------------------------------------------------------*/
77
/* Kernel System variables                                              */
78
/*----------------------------------------------------------------------*/
79
 
80
int global_errnumber;   /*+ Errno used in system initialization  +*/
81
CONTEXT global_context; /*+ Context used during initialization;
82
                            It references also a safe stack      +*/
83
 
84
int task_counter;       /*+ Application task counter. It represent
85
                            the number of Application tasks in the
86
                            system. When all Application Tasks end,
87
                            also the system ends.                +*/
88
 
89
int system_counter;     /*+ System task counter. It represent
90
                            the number of System tasks in the
91
                            system with the NO_KILL flag reset.
92
                            When all Application Tasks end,
93
                            the system waits for the end of the
94
                            system tasks and then it ends.       +*/
95
 
96
PID exec;               /*+ Task advised by the scheduler        +*/
97
PID exec_shadow;        /*+ Currently executing task             +*/
98
 
29 pj 99
IQUEUE freedesc;        /*+ Free descriptor handled as a queue   +*/
2 pj 100
 
101
DWORD sys_tick;         /*+ System tick (in usec)                +*/
102
struct timespec schedule_time;
103
                        /*+ Timer read at each call to schedule()+*/
104
 
105
int   cap_timer;        /*+ the capacity event posted when the
106
                            task starts                          +*/
107
struct timespec cap_lasttime;
108
                        /*+ the time at whitch the capacity
109
                            event is posted. Normally, it is
110
                            equal to schedule_time               +*/
111
 
112
 
113
 
114
DWORD sched_levels;     /*+ Schedule levels active in the system +*/
115
DWORD res_levels;       /*+ Resource levels active in the system +*/
116
 
117
/*+ Process descriptor table +*/
118
proc_des proc_table[MAX_PROC];
119
 
38 pj 120
/* Scheduling modules descriptor table */
121
/* ------------------------------------------------------------------------ */
122
 
123
/* the descriptor table */
2 pj 124
level_des *level_table[MAX_SCHED_LEVEL];
38 pj 125
/* ... and the size of each descriptor */
126
size_t level_size[MAX_SCHED_LEVEL];
2 pj 127
 
38 pj 128
/* an utilization counter incremented if a level is used by another module */
129
int level_used[MAX_SCHED_LEVEL];
130
/* these data structures (first, last, free, next & prev)
131
   are used to implement a double linked list of scheduling modules.
132
   That list is used by the scheduler to call the module's schedulers. */
133
int level_first; /* first module in the list */
134
int level_last;  /* last module in the list */
135
int level_free;  /* free single linked list of free module descriptors. */
136
int level_next[MAX_SCHED_LEVEL];
137
int level_prev[MAX_SCHED_LEVEL];
138
/* ------------------------------------------------------------------------ */
139
 
2 pj 140
/*+ Resource descriptor table +*/
141
resource_des *resource_table[MAX_RES_LEVEL];
142
 
143
/*+ This variable is set by the system call sys_end() or sys_abort().
144
    When a sys_end() or sys_abort is called into an event handler,
145
    we don't have to change context in the reschedule().
146
    look at kernel/event.c +*/
147
int mustexit = 0;
148
 
149
/*+ this is the system runlevel... it may be from 0 to 4:
150
 
151
    1 - running
152
    2 - shutdown
153
    3 - before halting
154
    4 - halting
155
+*/
156
int runlevel;
157
 
158
/*+ this variable is set to 1 into call_runlevel_func (look at init.c)
159
    ad it is used because the task_activate (look at activate.c) must
160
    work in a different way when the system is in the global_context +*/
161
int calling_runlevel_func;
162
 
163
 
164
/*----------------------------------------------------------------------*/
165
/* Kernel internal functions                                            */
166
/*----------------------------------------------------------------------*/
167
 
168
/*+ errno Handling: this functions returns the correct address for errno.
169
    The address returned can be either the global errno or the errno local
170
    to the execution task */
171
static int *__errnumber()
172
{
173
  if (exec_shadow == -1)
174
    return &global_errnumber;
175
  else
176
    return &(proc_table[exec_shadow].errnumber);
177
}
178
 
179
/*+ this is the capacity timer. it fires when the running task has expired
180
    his time contained in the avail_time field. The event is tipically
181
    posted in the scheduler() after the task_dispatch. The task_dispatch
182
    can modify the avail_time field to reach his scheduling purposes.
183
    The wcet field is NOT used in the Generic kernel. it is initialized at
184
    init time to 0. +*/
185
void capacity_timer(void *arg)
186
{
187
  /* the capacity event is served, so at the epilogue we
188
     don't have to erase it */
189
  cap_timer = NIL;
190
 
191
//  kern_printf("cap%d ",exec_shadow);
192
 
193
  /* When we reschedule, the call to task_epilogue check the slice and
194
     put the task in the queue's tail */
195
  event_need_reschedule();
196
}
197
 
198
/*+
199
  Generic Scheduler:
200
  This function select the next task that should be executed.
201
  The selection is made calling the level schedulers.
202
  It assume that THERE IS a task that can be scheduled in one
203
  level.
204
 
205
  The general scheduler:
206
  - first, it checks for interrupts.
207
  - then, it calls the epilogue of the task pointed in exec_shadow
208
  - after that, it calls the level schedulers
209
  - then it sets exec and it follows the shadow chain
210
  - finally it calls task_dispatch for the new task (the shadow!!!),
211
    saying if exec != exec_shadow
212
 
213
+*/
214
void scheduler(void)
215
{
216
  LEVEL l;    /* a counter                                     */
217
  struct timespec ty; /* a dummy used for time computation     */
218
 
219
  PID p;      /* p is the task chosen by the level scheduler   */
220
  int ok;     /* 1 only if the task chosen by the level scheduler
221
                 is eligible (normally, it is; but in some server
222
                 it is not always true (i.e., CBS))            */
223
 
224
  PID old_exec_shadow;
225
 
226
  if ( (exec_shadow != -1 &&
227
       (proc_table[exec_shadow].control & NO_PREEMPT) ) )
228
    return;
229
 
38 pj 230
  //  kern_printf("(!");
231
 
2 pj 232
  /*
233
  exec_shadow = exec = -1 only if the scheduler is called from:
234
   . task_endcycle
235
   . task_kill
236
   . task_extract
237
   . task_sleep
238
   . task_delay
239
  and from the system startup routines.
240
 
241
  Normally, the scheduler is called with exec & co != -1...
242
 
243
  if exec & co. is set to -1 before calling scheduler(), the following
244
  stuffs have to be executed before the call
245
  - get the schedule_time
246
  - account the capacity if necessary
247
  - call an epilogue
248
  */
249
 
38 pj 250
  /* then, we call the epilogue. the epilogue tipically checks the
251
     avail_time field... */
2 pj 252
  if (exec_shadow != -1) {
38 pj 253
    kern_epilogue_macro();
2 pj 254
 
255
    l = proc_table[exec_shadow].task_level;
38 pj 256
    level_table[l]->public_epilogue(l,exec_shadow);
2 pj 257
  }
258
 
38 pj 259
  //  kern_printf("[");
260
 
261
  l = level_first;
2 pj 262
  for(;;) {
263
    do {
38 pj 264
      p = level_table[l]->public_scheduler(l);
265
      //      kern_printf("p=%d",p);
2 pj 266
      if (p != NIL)
267
        ok = level_table[ proc_table[p].task_level ]->
38 pj 268
          public_eligible(proc_table[p].task_level,p);
2 pj 269
      else
270
        ok = 0;
38 pj 271
      //      kern_printf(" ok=%d",ok);      
2 pj 272
    } while (ok < 0); /* repeat the level scheduler if the task isn't
273
                         eligible... (ex. in the aperiodic servers...) */
274
    if (p != NIL) break;
275
 
38 pj 276
    l = level_next[l];  /* THERE MUST BE a level with a task to schedule */
277
    //    kern_printf(" l=%d",l);      
2 pj 278
  };
279
 
38 pj 280
  //  kern_printf("]");
281
 
2 pj 282
  /* tracer stuff */
283
  //trc_logevent(exec,TRC_SCHEDULE,NULL,0);
284
 
285
  /* we follow the shadow chain */
286
  old_exec_shadow=exec_shadow;
287
  exec_shadow = exec = p;
288
  while (exec_shadow != proc_table[exec_shadow].shadow)
289
    exec_shadow = proc_table[exec_shadow].shadow;
290
 
291
  /* tracer stuff */
292
  //trc_logevent(exec_shadow,TRC_DISPATCH,NULL,0);
293
  if (old_exec_shadow!=exec_shadow)
294
    trc_logevent(TRC_SCHEDULE,&exec_shadow);
38 pj 295
  //    kern_printf("[%i->%i]",old_exec_shadow,exec_shadow);
2 pj 296
 
297
  /* we control the correctness of the shadows when we kill */
298
  proc_table[exec_shadow].status = EXE;
299
 
38 pj 300
  //  kern_printf("(d%d)",exec_shadow);
2 pj 301
  l = proc_table[exec_shadow].task_level;
38 pj 302
  level_table[l]->public_dispatch(l, exec_shadow, exec!=exec_shadow);
2 pj 303
 
38 pj 304
  //  kern_printf("*");
305
 
2 pj 306
  /* Finally,we post the capacity event, BUT
307
     . only if the task require that
308
     . only if exec==exec_shadow (if a task is blocked we don't want
309
       to check the capacity!!!) */
310
  if ((proc_table[exec_shadow].control & CONTROL_CAP)
311
      && exec==exec_shadow) {
312
    TIMESPEC_ASSIGN(&ty, &schedule_time);
313
    ADDUSEC2TIMESPEC(proc_table[exec_shadow].avail_time,&ty);
38 pj 314
    //    kern_printf("³s%d ns%d sched s%d ns%d³",ty.tv_sec,ty.tv_nsec, schedule_time.tv_sec, schedule_time.tv_nsec);
2 pj 315
    cap_timer = kern_event_post(&ty, capacity_timer, NULL);
316
  }
317
  /* set the time at witch the task is scheduled */
318
  TIMESPEC_ASSIGN(&cap_lasttime, &schedule_time);
319
 
38 pj 320
  //  kern_printf("(s%d)",exec_shadow);
2 pj 321
}
322
 
323
 
324
/*+
325
  Guarantee:
326
  This function guarantees the system: it calls the
327
  level_guarantee of each level that have that function != NULL
328
 
329
  The guarantee is based on a utilization factor basis.
330
  We mantain only a DWORD. num has to be interpreted as num/MAX_DWORD
331
  free bandwidth.
332
+*/
333
int guarantee()
334
{
335
  bandwidth_t num=MAX_BANDWIDTH;
336
  int l;
337
 
38 pj 338
  for (l =0; l<MAX_SCHED_LEVEL && level_table[l]->public_guarantee; l++)
339
    if (!level_table[l]->public_guarantee(l,&num))
2 pj 340
      return -1;
341
 
342
  return 0; /* OK */
343
}
344
 
345
/*----------------------------------------------------------------------*/
346
/* Context switch handling functions                                    */
347
/*----------------------------------------------------------------------*/
348
/* this function is called every time a context change occurs,
349
   when a task is preempted by an event called into an IRQ */
350
void kern_after_dispatch()
351
{
352
  /* every time a task wakes up from an IRQ, it has to check for async
353
     cancellation */
354
  check_killed_async();
355
 
356
  /* Then, look for pending signal delivery */
357
  kern_deliver_pending_signals();
358
}
359
 
360
/*----------------------------------------------------------------------*/
361
/* Kernel main system functions                                         */
362
/*----------------------------------------------------------------------*/
363
 
364
extern int trc_systemevents(trc_event_t *evt, int event, void *ptr);
365
 
366
/*+
367
  This function initialize
368
  - the virtual machine (timer, interrupt, mem)
369
  the system's structures (queues, tables) , & the two task main &
370
  dummy, that are always present
371
+*/
45 pj 372
void __kernel_init__(/* struct multiboot_info *multiboot */ void)
2 pj 373
{
374
  int i,j;                                              /* counters */
375
 
376
  struct ll_initparms parms;                          /* for the VM */
377
 
378
//  extern void C8042_restore(void);              /* an exit function */
379
  int aborting;          /* it is set if we are aborting the system */
380
 
45 pj 381
  struct multiboot_info *multiboot=mbi_address();
2 pj 382
 
383
 
384
 
385
  /*
386
   * Runlevel 0: kernel startup
387
   *
388
   *
389
   */
390
 
38 pj 391
  runlevel = RUNLEVEL_STARTUP;
2 pj 392
 
393
  /* The kernel startup MUST proceed with int disabled!    */
394
  kern_cli();
395
 
396
  /* First we initialize the memory allocator, because it is needed by
397
     __kernel_register_levels__     */
398
  kern_mem_init(multiboot);
399
 
400
  /* Clear the task descriptors */
401
  for (i = 0; i < MAX_PROC; i++) {
402
     proc_table[i].task_level   = -1;
403
     proc_table[i].stack        = NULL;
404
     proc_table[i].name[0]      = 0;
405
     proc_table[i].status       = FREE;
406
     proc_table[i].pclass       = 0;
407
     proc_table[i].group        = 0;
408
     proc_table[i].stacksize    = 0;
409
     proc_table[i].control      = 0;
410
     proc_table[i].frozen_activations = 0;
411
     proc_table[i].sigmask      = 0;
412
     proc_table[i].sigpending   = 0;
413
     proc_table[i].avail_time   = 0;
414
     proc_table[i].shadow       = i;
415
     proc_table[i].cleanup_stack= NULL;
416
     proc_table[i].errnumber    = 0;
29 pj 417
     //proc_table[i].priority     = 0;
418
     //NULL_TIMESPEC(&proc_table[i].timespec_priority);
2 pj 419
     proc_table[i].delay_timer  = -1;
420
     proc_table[i].wcet         = -1;
421
 
422
     proc_table[i].jet_tvalid   = 0;
423
     proc_table[i].jet_curr     = 0;
424
     proc_table[i].jet_max      = 0;
425
     proc_table[i].jet_sum      = 0;
426
     proc_table[i].jet_n        = 0;
427
     for (j=0; j<JET_TABLE_DIM; j++)
428
        proc_table[i].jet_table[j] = 0;
429
 
430
     proc_table[i].waiting_for_me = NIL;
431
     proc_table[i].return_value   = NULL;
432
 
433
     for (j=0; j<PTHREAD_KEYS_MAX; j++)
434
       proc_table[i].keys[j] = NULL;
435
  }
436
 
29 pj 437
  /* set up the free descriptor queue */
438
  //  for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1;
439
  //  proc_table[MAX_PROC-1].next = NIL;
440
  //  for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1;
441
  //  proc_table[0].prev = NIL;
442
  //  freedesc = 0;
443
  iq_init(&freedesc, NULL, 0);
444
  for (i = 0; i < MAX_PROC; i++)
445
    iq_insertlast(i,&freedesc);
446
 
2 pj 447
  /* Set up the varius stuff */
448
  global_errnumber = 0;
449
  task_counter     = 0;
450
  system_counter   = 0;
451
  exec             = -1;
452
  exec_shadow      = -1;
453
  cap_timer        = -1;
454
  NULL_TIMESPEC(&cap_lasttime);
455
  sched_levels     = 0;  /* They are not registered yet... */
456
  res_levels       = 0;
457
  calling_runlevel_func = 0;
458
 
459
  /* Clear the key-specific data */
460
  task_specific_data_init();
461
 
462
  /* Clear exit and init functions */
463
  runlevel_init();
464
 
465
  /* Init VM layer (Interrupts, levels & memory management)           */
466
  /* for old exception handling, use excirq_init() */
467
  signals_init();
38 pj 468
  set_default_exception_handler();
2 pj 469
 
38 pj 470
  /* Clear scheduling modules registration data */
471
  levels_init();
472
 
2 pj 473
  sys_tick = __kernel_register_levels__(multiboot);
474
 
475
  /* tracer stuff */
476
  /*
477
  trc_register_eventclass(TRC_CLASS_SYSTEM,
478
                          TRC_SYSTEMNUMBER,
479
                          trc_systemevents);
480
  */
481
 
482
  /* test on system tick */
483
  if (sys_tick>=55000)  {
484
     printk("The system tick must be less than 55 mSec!");
485
     l1_exit(0);
486
  }
487
 
488
  /* OSLib initialization */
489
  if (sys_tick)
490
    parms.mode = LL_PERIODIC;
491
  else
492
    parms.mode = LL_ONESHOT; // one shot!!!
493
 
494
  parms.tick = sys_tick;
495
 
496
  /*
38 pj 497
   * Runlevel INIT: Let's go!!!!
2 pj 498
   *
499
   *
500
   */
501
 
502
  runlevel = RUNLEVEL_INIT;
503
 
504
  ll_init();
505
  event_init(&parms);
506
  seterrnumber(__errnumber);
507
  event_setprologue(event_resetepilogue);
508
  event_setlasthandler(kern_after_dispatch);
509
 
510
  /* call the init functions */
511
  call_runlevel_func(RUNLEVEL_INIT, 0);
512
 
38 pj 513
 
514
 
515
 
516
  /*
517
   * Runlevel RUNNING: Hoping that all works fine ;-)
518
   *
519
   *
520
   */
521
 
522
  runlevel = RUNLEVEL_RUNNING;
523
 
2 pj 524
  /* reset keyboard after exit */
525
//  sys_atexit((void(*)(void *))C8042_restore,NULL,AFTER_EXIT);
526
 
527
  /* tracer stuff */
528
  trc_resume();
529
 
530
  /* exec and exec_shadow are already = -1 */
38 pj 531
  kern_gettime(&schedule_time);
2 pj 532
  scheduler();
533
  global_context = ll_context_from(); /* It will be used by sys_end */
534
  ll_context_to(proc_table[exec_shadow].context);
535
 
536
  /*
537
   *
538
   * Now the system starts!!!
539
   * (hoping that someone has created some task(s) )
540
   * The function returns only at system end...
541
   *
542
   */
543
 
544
 
545
  /*
38 pj 546
   * Runlevel SHUTDOWN: Shutting down the system... :-(
2 pj 547
   *
548
   *
549
   */
550
 
551
  event_setlasthandler(NULL);
552
 
553
  // ll_abort(666); 
554
  /* tracer stuff */
555
  trc_suspend();
556
 
557
  runlevel = RUNLEVEL_SHUTDOWN;
558
 
559
  /* 1 when the error code is != 0 */
560
  aborting = global_errnumber > 0;
561
 
38 pj 562
  //kern_printf("after  - system_counter=%d, task_counter = %d\n", system_counter,task_counter); 
2 pj 563
 
564
  call_runlevel_func(RUNLEVEL_SHUTDOWN, aborting);
565
 
38 pj 566
  //kern_printf("before - system_counter=%d, task_counter = %d\n", system_counter,task_counter);
2 pj 567
 
568
  if (system_counter) {
569
    /* To shutdown the kernel correctly, we have to wait that all the SYSTEM
570
       tasks that are killable will die...
571
 
572
       We don't mess about the user task... we only kill them and reschedule
573
       The only thing important is that the system tasks shut down correctly.
574
       We do nothing for user tasks that remain active (because, for example,
575
       they have the cancelability set to deferred) when the system goes to
576
       runlevel 3 */
38 pj 577
    //kern_printf("Û%lu",kern_gettime(NULL));
2 pj 578
    kill_user_tasks();
38 pj 579
    //kern_printf("Û%lu",kern_gettime(NULL)); 
2 pj 580
 
581
    /* we have to go again in multitasking mode!!! */
582
    mustexit = 0;
583
 
584
    /* exec and exec_shadow are already = -1 */
38 pj 585
    kern_gettime(&schedule_time);
2 pj 586
    global_context = ll_context_from(); /* It will be used by sys_end */
587
    scheduler();
588
 
589
    event_setlasthandler(kern_after_dispatch);
590
    ll_context_to(proc_table[exec_shadow].context);
591
    event_setlasthandler(NULL);
592
  }
593
 
594
 
595
 
596
 
597
  /*
38 pj 598
   * Runlevel BEFORE_EXIT: Before Halting the system
2 pj 599
   *
600
   *
601
   */
602
 
603
  runlevel = RUNLEVEL_BEFORE_EXIT;
604
 
605
 
606
  /* the field global_errnumber is
607
     =0  if the system normally ends
608
     !=0 if an abort is issued
609
  */
610
 
611
  //kern_printf("Chiamo exit Functions\n"); 
612
 
613
  call_runlevel_func(RUNLEVEL_BEFORE_EXIT, aborting);
614
 
615
  //kern_printf("Dopo exit Functions\n"); 
616
 
617
  /* Shut down the VM layer */
618
  ll_end();
619
 
620
 
621
  /*
38 pj 622
   * Runlevel AFTER_EXIT: After halting...
2 pj 623
   *
624
   *
625
   */
626
 
627
  runlevel = RUNLEVEL_AFTER_EXIT;
628
 
629
  //kern_printf("prima before Functions\n"); 
630
 
631
  call_runlevel_func(RUNLEVEL_AFTER_EXIT, 0);
632
 
633
  //kern_printf("dopo before Functions\n"); 
634
  kern_cli();
635
  if (global_errnumber) {
636
    /* vm_abort called */
637
    kern_printf("Abort detected\nCode : %u\n",global_errnumber);
638
    l1_exit(-1);
639
  }
640
 
641
  l1_exit(0); // System terminated normally
642
 
643
}
644
 
38 pj 645
/* IMPORTANT!!!
646
   I'm almost sure the shutdown procedure does not work into interrupts. */
2 pj 647
void internal_sys_end(int i)
648
{
649
  LEVEL l;    /* a counter                                     */
38 pj 650
 
651
  /* if something goes wron during the real mode */
652
  if (runlevel==RUNLEVEL_STARTUP || runlevel==RUNLEVEL_AFTER_EXIT)
653
    l1_exit(i);
2 pj 654
 
655
  //kern_printf("mustexit=%d",mustexit);
38 pj 656
  if (mustexit)
657
    return;
2 pj 658
 
38 pj 659
  mustexit = 1;
2 pj 660
 
38 pj 661
  global_errnumber = i;
662
 
663
 
664
  if (!ll_ActiveInt()) {
665
    proc_table[exec_shadow].context = kern_context_save();
666
 
2 pj 667
    if (exec_shadow != -1) {
38 pj 668
      kern_gettime(&schedule_time);
669
 
670
      kern_epilogue_macro();
671
 
2 pj 672
      /* then, we call the epilogue. the epilogue tipically checks the
38 pj 673
         avail_time field... */
2 pj 674
      l = proc_table[exec_shadow].task_level;
38 pj 675
      level_table[l]->public_epilogue(l,exec_shadow);
676
 
2 pj 677
      exec_shadow = exec = -1;
678
    }
38 pj 679
    kern_context_load(global_context);
680
  }
2 pj 681
 
38 pj 682
  if (ll_ActiveInt()) {
683
    ll_context_to(global_context);
684
    /* The context change will be done when all the interrupts end!!! */
2 pj 685
  }
38 pj 686
 
2 pj 687
  //kern_printf("fine sysend");
688
 
689
  /* the control reach this line only if we call sys_end() into an event
690
     handler (for example, if the event raises an exception with
691
     SA_USEFAST active and the exception calls sys_end() ) */
692
}
693
 
694
 
38 pj 695
/*
696
   Close the system & return to HOST OS.
697
   Can be called from tasks and from ISRS
698
 
699
 
700
*/
701
void sys_abort(int err)
2 pj 702
{
703
  SYS_FLAGS f;
704
 
705
  f = kern_fsave();
38 pj 706
  internal_sys_end(err);
2 pj 707
  kern_frestore(f);
708
}
709
 
38 pj 710
void sys_end(void)
2 pj 711
{
38 pj 712
  sys_abort(0);
2 pj 713
}
714
 
715
void _exit(int status)
716
{
38 pj 717
  sys_abort(status);
2 pj 718
}
719
 
720
 
721
 
722
/* this function is never called... used for the OSLib */
723
void sys_abort_tail(int code)
724
{
725
 //DUMMY!!!!
726
}
727
 
728
 
729
 
730
/*+ this primitive returns the time read from the system timer +*/
731
TIME sys_gettime(struct timespec *t)
732
{
733
  SYS_FLAGS f;
734
  TIME x;
735
 
736
  f = kern_fsave();
38 pj 737
  x = kern_gettime(t);
2 pj 738
  kern_frestore(f);
739
 
740
  return x;
741
}
742
 
743