Subversion Repositories shark

Rev

Rev 29 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   (see the web pages for full authors list)
11
 *
12
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
13
 *
14
 * http://www.sssup.it
15
 * http://retis.sssup.it
16
 * http://shark.sssup.it
17
 */
18
 
19
/**
20
 ------------
29 pj 21
 CVS :        $Id: kern.c,v 1.2 2002-11-11 08:34:08 pj Exp $
2 pj 22
 
23
 File:        $File$
29 pj 24
 Revision:    $Revision: 1.2 $
25
 Last update: $Date: 2002-11-11 08:34:08 $
2 pj 26
 ------------
27
 
28
 This file contains:
29
 
30
 - the kernel system variables
31
 
32
 - the errno functions
33
 
34
 - the scheduler, capacity timer, and grarantee
35
 
36
 - the sys_abort, sys_end, sys_gettime
37
 
38
 
39
**/
40
 
41
/*
42
 * Copyright (C) 2000 Paolo Gai
43
 *
44
 * This program is free software; you can redistribute it and/or modify
45
 * it under the terms of the GNU General Public License as published by
46
 * the Free Software Foundation; either version 2 of the License, or
47
 * (at your option) any later version.
48
 *
49
 * This program is distributed in the hope that it will be useful,
50
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
51
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
52
 * GNU General Public License for more details.
53
 *
54
 * You should have received a copy of the GNU General Public License
55
 * along with this program; if not, write to the Free Software
56
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
57
 *
58
 */
59
 
60
#include <stdarg.h>
61
#include <ll/ll.h>
62
#include <ll/stdlib.h>
63
#include <ll/stdio.h>
64
#include <ll/string.h>
65
#include <kernel/config.h>
66
#include <kernel/model.h>
67
#include <kernel/const.h>
68
#include <sys/types.h>
69
#include <kernel/types.h>
70
#include <kernel/descr.h>
71
#include <errno.h>
72
#include <kernel/var.h>
73
#include <kernel/func.h>
74
#include <kernel/trace.h>
75
 
76
/*----------------------------------------------------------------------*/
77
/* Kernel System variables                                              */
78
/*----------------------------------------------------------------------*/
79
 
80
int global_errnumber;   /*+ Errno used in system initialization  +*/
81
CONTEXT global_context; /*+ Context used during initialization;
82
                            It references also a safe stack      +*/
83
 
84
int task_counter;       /*+ Application task counter. It represent
85
                            the number of Application tasks in the
86
                            system. When all Application Tasks end,
87
                            also the system ends.                +*/
88
 
89
int system_counter;     /*+ System task counter. It represent
90
                            the number of System tasks in the
91
                            system with the NO_KILL flag reset.
92
                            When all Application Tasks end,
93
                            the system waits for the end of the
94
                            system tasks and then it ends.       +*/
95
 
96
PID exec;               /*+ Task advised by the scheduler        +*/
97
PID exec_shadow;        /*+ Currently executing task             +*/
98
 
29 pj 99
IQUEUE freedesc;        /*+ Free descriptor handled as a queue   +*/
2 pj 100
 
101
DWORD sys_tick;         /*+ System tick (in usec)                +*/
102
struct timespec schedule_time;
103
                        /*+ Timer read at each call to schedule()+*/
104
 
105
int   cap_timer;        /*+ the capacity event posted when the
106
                            task starts                          +*/
107
struct timespec cap_lasttime;
108
                        /*+ the time at whitch the capacity
109
                            event is posted. Normally, it is
110
                            equal to schedule_time               +*/
111
 
112
 
113
 
114
DWORD sched_levels;     /*+ Schedule levels active in the system +*/
115
DWORD res_levels;       /*+ Resource levels active in the system +*/
116
 
117
/*+ Process descriptor table +*/
118
proc_des proc_table[MAX_PROC];
119
 
120
/*+ Level descriptor table +*/
121
level_des *level_table[MAX_SCHED_LEVEL];
122
 
123
/*+ Resource descriptor table +*/
124
resource_des *resource_table[MAX_RES_LEVEL];
125
 
126
/*+ This variable is set by the system call sys_end() or sys_abort().
127
    When a sys_end() or sys_abort is called into an event handler,
128
    we don't have to change context in the reschedule().
129
    look at kernel/event.c +*/
130
int mustexit = 0;
131
 
132
/*+ this is the system runlevel... it may be from 0 to 4:
133
 
134
    1 - running
135
    2 - shutdown
136
    3 - before halting
137
    4 - halting
138
+*/
139
int runlevel;
140
 
141
/*+ this variable is set to 1 into call_runlevel_func (look at init.c)
142
    ad it is used because the task_activate (look at activate.c) must
143
    work in a different way when the system is in the global_context +*/
144
int calling_runlevel_func;
145
 
146
 
147
/*----------------------------------------------------------------------*/
148
/* Kernel internal functions                                            */
149
/*----------------------------------------------------------------------*/
150
 
151
/*+ errno Handling: this functions returns the correct address for errno.
152
    The address returned can be either the global errno or the errno local
153
    to the execution task */
154
static int *__errnumber()
155
{
156
  if (exec_shadow == -1)
157
    return &global_errnumber;
158
  else
159
    return &(proc_table[exec_shadow].errnumber);
160
}
161
 
162
/*+ this is the capacity timer. it fires when the running task has expired
163
    his time contained in the avail_time field. The event is tipically
164
    posted in the scheduler() after the task_dispatch. The task_dispatch
165
    can modify the avail_time field to reach his scheduling purposes.
166
    The wcet field is NOT used in the Generic kernel. it is initialized at
167
    init time to 0. +*/
168
void capacity_timer(void *arg)
169
{
170
  /* the capacity event is served, so at the epilogue we
171
     don't have to erase it */
172
  cap_timer = NIL;
173
 
174
//  kern_printf("cap%d ",exec_shadow);
175
 
176
  /* When we reschedule, the call to task_epilogue check the slice and
177
     put the task in the queue's tail */
178
  event_need_reschedule();
179
}
180
 
181
/*+
182
  Generic Scheduler:
183
  This function select the next task that should be executed.
184
  The selection is made calling the level schedulers.
185
  It assume that THERE IS a task that can be scheduled in one
186
  level.
187
 
188
  The general scheduler:
189
  - first, it checks for interrupts.
190
  - then, it calls the epilogue of the task pointed in exec_shadow
191
  - after that, it calls the level schedulers
192
  - then it sets exec and it follows the shadow chain
193
  - finally it calls task_dispatch for the new task (the shadow!!!),
194
    saying if exec != exec_shadow
195
 
196
+*/
197
void scheduler(void)
198
{
199
  LEVEL l;    /* a counter                                     */
200
  TIME tx;    /* a dummy used for time computation             */
201
  struct timespec ty; /* a dummy used for time computation     */
202
 
203
  PID p;      /* p is the task chosen by the level scheduler   */
204
  int ok;     /* 1 only if the task chosen by the level scheduler
205
                 is eligible (normally, it is; but in some server
206
                 it is not always true (i.e., CBS))            */
207
 
208
  PID old_exec_shadow;
209
 
210
  if ( (exec_shadow != -1 &&
211
       (proc_table[exec_shadow].control & NO_PREEMPT) ) )
212
    return;
213
 
214
  /*
215
  exec_shadow = exec = -1 only if the scheduler is called from:
216
   . task_endcycle
217
   . task_kill
218
   . task_extract
219
   . task_sleep
220
   . task_delay
221
  and from the system startup routines.
222
 
223
  Normally, the scheduler is called with exec & co != -1...
224
 
225
  if exec & co. is set to -1 before calling scheduler(), the following
226
  stuffs have to be executed before the call
227
  - get the schedule_time
228
  - account the capacity if necessary
229
  - call an epilogue
230
  */
231
 
232
  if (exec_shadow != -1) {
233
    // ok is set 4 debug :-(
234
    ok = ll_gettime(TIME_EXACT, &schedule_time);
235
//    kern_printf("(%d sched s%d ns%d)", ok, schedule_time.tv_sec, schedule_time.tv_nsec);
236
 
237
    /* manage the capacity event */
238
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
239
    tx = TIMESPEC2USEC(&ty);
240
    proc_table[exec_shadow].avail_time -= tx;
241
    jet_update_slice(tx);
242
 
243
    /* if the event didn't fire before, we delete it. */
244
    if (cap_timer != NIL) {
245
      event_delete(cap_timer);
246
      cap_timer = NIL;
247
    }
248
 
249
    /* then, we call the epilogue. the epilogue tipically checks the
250
       avail_time field... */
251
 
252
//  kern_printf("(e%d)",exec_shadow);
253
 
254
    l = proc_table[exec_shadow].task_level;
255
    level_table[l]->task_epilogue(l,exec_shadow);
256
  }
257
 
258
  l = 0;
259
  for(;;) {
260
    do {
261
      p = level_table[l]->level_scheduler(l);
262
      if (p != NIL)
263
        ok = level_table[ proc_table[p].task_level ]->
264
          task_eligible(proc_table[p].task_level,p);
265
      else
266
        ok = 0;
267
    } while (ok < 0); /* repeat the level scheduler if the task isn't
268
                         eligible... (ex. in the aperiodic servers...) */
269
    if (p != NIL) break;
270
 
271
    l++;            /* THERE MUST BE a level with a task to schedule */
272
  };
273
 
274
  /* tracer stuff */
275
  //trc_logevent(exec,TRC_SCHEDULE,NULL,0);
276
 
277
  /* we follow the shadow chain */
278
  old_exec_shadow=exec_shadow;
279
  exec_shadow = exec = p;
280
  while (exec_shadow != proc_table[exec_shadow].shadow)
281
    exec_shadow = proc_table[exec_shadow].shadow;
282
 
283
  /* tracer stuff */
284
  //trc_logevent(exec_shadow,TRC_DISPATCH,NULL,0);
285
  if (old_exec_shadow!=exec_shadow)
286
    trc_logevent(TRC_SCHEDULE,&exec_shadow);
287
  //  kern_printf("[%i->%i]",old_exec_shadow,exec_shadow);
288
 
289
  /* we control the correctness of the shadows when we kill */
290
  proc_table[exec_shadow].status = EXE;
291
 
292
  //kern_printf("(d%d)",exec_shadow);
293
  l = proc_table[exec_shadow].task_level;
294
  level_table[l]->task_dispatch(l, exec_shadow, exec!=exec_shadow);
295
 
296
  /* Finally,we post the capacity event, BUT
297
     . only if the task require that
298
     . only if exec==exec_shadow (if a task is blocked we don't want
299
       to check the capacity!!!) */
300
  if ((proc_table[exec_shadow].control & CONTROL_CAP)
301
      && exec==exec_shadow) {
302
    TIMESPEC_ASSIGN(&ty, &schedule_time);
303
    ADDUSEC2TIMESPEC(proc_table[exec_shadow].avail_time,&ty);
304
//    kern_printf("³s%d ns%d sched s%d ns%d³",ty.tv_sec,ty.tv_nsec, schedule_time.tv_sec, schedule_time.tv_nsec);
305
    cap_timer = kern_event_post(&ty, capacity_timer, NULL);
306
  }
307
  /* set the time at witch the task is scheduled */
308
  TIMESPEC_ASSIGN(&cap_lasttime, &schedule_time);
309
 
310
  //if (runlevel != 1) kern_printf("(s%d)",exec_shadow);
311
}
312
 
313
 
314
/*+
315
  Guarantee:
316
  This function guarantees the system: it calls the
317
  level_guarantee of each level that have that function != NULL
318
 
319
  The guarantee is based on a utilization factor basis.
320
  We mantain only a DWORD. num has to be interpreted as num/MAX_DWORD
321
  free bandwidth.
322
+*/
323
int guarantee()
324
{
325
  bandwidth_t num=MAX_BANDWIDTH;
326
  int l;
327
 
328
  for (l =0; l<MAX_SCHED_LEVEL && level_table[l]->level_guarantee; l++)
329
    if (!level_table[l]->level_guarantee(l,&num))
330
      return -1;
331
 
332
  return 0; /* OK */
333
}
334
 
335
/*----------------------------------------------------------------------*/
336
/* Context switch handling functions                                    */
337
/*----------------------------------------------------------------------*/
338
/* this function is called every time a context change occurs,
339
   when a task is preempted by an event called into an IRQ */
340
void kern_after_dispatch()
341
{
342
  /* every time a task wakes up from an IRQ, it has to check for async
343
     cancellation */
344
  check_killed_async();
345
 
346
  /* Then, look for pending signal delivery */
347
  kern_deliver_pending_signals();
348
}
349
 
350
/*----------------------------------------------------------------------*/
351
/* Kernel main system functions                                         */
352
/*----------------------------------------------------------------------*/
353
 
354
extern int trc_systemevents(trc_event_t *evt, int event, void *ptr);
355
 
356
/*+
357
  This function initialize
358
  - the virtual machine (timer, interrupt, mem)
359
  the system's structures (queues, tables) , & the two task main &
360
  dummy, that are always present
361
+*/
362
void __kernel_init__(struct multiboot_info *multiboot)
363
{
364
  int i,j;                                              /* counters */
365
 
366
  struct ll_initparms parms;                          /* for the VM */
367
 
368
//  extern void C8042_restore(void);              /* an exit function */
369
  int aborting;          /* it is set if we are aborting the system */
370
 
371
 
372
 
373
 
374
 
375
  /*
376
   * Runlevel 0: kernel startup
377
   *
378
   *
379
   */
380
 
381
  runlevel = 0;
382
 
383
  /* The kernel startup MUST proceed with int disabled!    */
384
  kern_cli();
385
 
386
  /* First we initialize the memory allocator, because it is needed by
387
     __kernel_register_levels__     */
388
  kern_mem_init(multiboot);
389
 
390
  /* Clear the task descriptors */
391
  for (i = 0; i < MAX_PROC; i++) {
392
     proc_table[i].task_level   = -1;
393
     proc_table[i].stack        = NULL;
394
     proc_table[i].name[0]      = 0;
395
     proc_table[i].status       = FREE;
396
     proc_table[i].pclass       = 0;
397
     proc_table[i].group        = 0;
398
     proc_table[i].stacksize    = 0;
399
     proc_table[i].control      = 0;
400
     proc_table[i].frozen_activations = 0;
401
     proc_table[i].sigmask      = 0;
402
     proc_table[i].sigpending   = 0;
403
     NULL_TIMESPEC(&proc_table[i].request_time);
404
     proc_table[i].avail_time   = 0;
405
     proc_table[i].shadow       = i;
406
     proc_table[i].cleanup_stack= NULL;
407
     proc_table[i].errnumber    = 0;
29 pj 408
     //proc_table[i].priority     = 0;
409
     //NULL_TIMESPEC(&proc_table[i].timespec_priority);
2 pj 410
     proc_table[i].delay_timer  = -1;
411
     proc_table[i].wcet         = -1;
412
 
413
     proc_table[i].jet_tvalid   = 0;
414
     proc_table[i].jet_curr     = 0;
415
     proc_table[i].jet_max      = 0;
416
     proc_table[i].jet_sum      = 0;
417
     proc_table[i].jet_n        = 0;
418
     for (j=0; j<JET_TABLE_DIM; j++)
419
        proc_table[i].jet_table[j] = 0;
420
 
421
     proc_table[i].waiting_for_me = NIL;
422
     proc_table[i].return_value   = NULL;
423
 
424
     for (j=0; j<PTHREAD_KEYS_MAX; j++)
425
       proc_table[i].keys[j] = NULL;
426
  }
427
 
29 pj 428
  /* set up the free descriptor queue */
429
  //  for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1;
430
  //  proc_table[MAX_PROC-1].next = NIL;
431
  //  for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1;
432
  //  proc_table[0].prev = NIL;
433
  //  freedesc = 0;
434
  iq_init(&freedesc, NULL, 0);
435
  for (i = 0; i < MAX_PROC; i++)
436
    iq_insertlast(i,&freedesc);
437
 
2 pj 438
  /* Set up the varius stuff */
439
  global_errnumber = 0;
440
  task_counter     = 0;
441
  system_counter   = 0;
442
  exec             = -1;
443
  exec_shadow      = -1;
444
  cap_timer        = -1;
445
  NULL_TIMESPEC(&cap_lasttime);
446
  sched_levels     = 0;  /* They are not registered yet... */
447
  res_levels       = 0;
448
  calling_runlevel_func = 0;
449
 
450
  /* Clear the key-specific data */
451
  task_specific_data_init();
452
 
453
  /* Clear exit and init functions */
454
  runlevel_init();
455
 
456
  /* Init VM layer (Interrupts, levels & memory management)           */
457
  /* for old exception handling, use excirq_init() */
458
  signals_init();
459
 
460
  sys_tick = __kernel_register_levels__(multiboot);
461
 
462
  /* tracer stuff */
463
  /*
464
  trc_register_eventclass(TRC_CLASS_SYSTEM,
465
                          TRC_SYSTEMNUMBER,
466
                          trc_systemevents);
467
  */
468
 
469
  /* test on system tick */
470
  if (sys_tick>=55000)  {
471
     printk("The system tick must be less than 55 mSec!");
472
     l1_exit(0);
473
  }
474
 
475
  /* OSLib initialization */
476
  if (sys_tick)
477
    parms.mode = LL_PERIODIC;
478
  else
479
    parms.mode = LL_ONESHOT; // one shot!!!
480
 
481
  parms.tick = sys_tick;
482
 
483
  /*
484
   * Runlevel 1: Let's go!!!!
485
   *
486
   *
487
   */
488
 
489
  runlevel = RUNLEVEL_INIT;
490
 
491
  ll_init();
492
  event_init(&parms);
493
  seterrnumber(__errnumber);
494
  event_setprologue(event_resetepilogue);
495
  event_setlasthandler(kern_after_dispatch);
496
 
497
  /* call the init functions */
498
  call_runlevel_func(RUNLEVEL_INIT, 0);
499
 
500
  /* reset keyboard after exit */
501
//  sys_atexit((void(*)(void *))C8042_restore,NULL,AFTER_EXIT);
502
 
503
  /* tracer stuff */
504
  trc_resume();
505
 
506
  /* exec and exec_shadow are already = -1 */
507
  ll_gettime(TIME_EXACT, &schedule_time);
508
  scheduler();
509
  global_context = ll_context_from(); /* It will be used by sys_end */
510
  ll_context_to(proc_table[exec_shadow].context);
511
 
512
  /*
513
   *
514
   * Now the system starts!!!
515
   * (hoping that someone has created some task(s) )
516
   * The function returns only at system end...
517
   *
518
   */
519
 
520
 
521
  /*
522
   * Runlevel 2: Shutting down the system... :-(
523
   *
524
   *
525
   */
526
 
527
  event_setlasthandler(NULL);
528
 
529
  // ll_abort(666); 
530
  /* tracer stuff */
531
  trc_suspend();
532
 
533
  runlevel = RUNLEVEL_SHUTDOWN;
534
 
535
  /* 1 when the error code is != 0 */
536
  aborting = global_errnumber > 0;
537
 
538
  //kern_printf("after  - system_counter=%d, task_counter = %d\n",
539
  //         system_counter,task_counter); 
540
 
541
  call_runlevel_func(RUNLEVEL_SHUTDOWN, aborting);
542
 
543
  //kern_printf("before - system_counter=%d, task_counter = %d\n",
544
  //          system_counter,task_counter);
545
 
546
  if (system_counter) {
547
    /* To shutdown the kernel correctly, we have to wait that all the SYSTEM
548
       tasks that are killable will die...
549
 
550
       We don't mess about the user task... we only kill them and reschedule
551
       The only thing important is that the system tasks shut down correctly.
552
       We do nothing for user tasks that remain active (because, for example,
553
       they have the cancelability set to deferred) when the system goes to
554
       runlevel 3 */
555
    //kern_printf("Û%lu",ll_gettime(TIME_EXACT,NULL));
556
    kill_user_tasks();
557
    //kern_printf("Û%lu",ll_gettime(TIME_EXACT,NULL)); 
558
 
559
    /* we have to go again in multitasking mode!!! */
560
    mustexit = 0;
561
 
562
    /* exec and exec_shadow are already = -1 */
563
    ll_gettime(TIME_EXACT, &schedule_time);
564
    global_context = ll_context_from(); /* It will be used by sys_end */
565
    scheduler();
566
 
567
    event_setlasthandler(kern_after_dispatch);
568
    ll_context_to(proc_table[exec_shadow].context);
569
    event_setlasthandler(NULL);
570
  }
571
 
572
 
573
 
574
 
575
  /*
576
   * Runlevel 3: Before Halting the system
577
   *
578
   *
579
   */
580
 
581
  runlevel = RUNLEVEL_BEFORE_EXIT;
582
 
583
 
584
  /* the field global_errnumber is
585
     =0  if the system normally ends
586
     !=0 if an abort is issued
587
  */
588
 
589
  //kern_printf("Chiamo exit Functions\n"); 
590
 
591
  call_runlevel_func(RUNLEVEL_BEFORE_EXIT, aborting);
592
 
593
  //kern_printf("Dopo exit Functions\n"); 
594
 
595
  /* Shut down the VM layer */
596
  ll_end();
597
 
598
 
599
  /*
600
   * Runlevel 4: After halting...
601
   *
602
   *
603
   */
604
 
605
  runlevel = RUNLEVEL_AFTER_EXIT;
606
 
607
  //kern_printf("prima before Functions\n"); 
608
 
609
  call_runlevel_func(RUNLEVEL_AFTER_EXIT, 0);
610
 
611
  //kern_printf("dopo before Functions\n"); 
612
  kern_cli();
613
  if (global_errnumber) {
614
    /* vm_abort called */
615
    kern_printf("Abort detected\nCode : %u\n",global_errnumber);
616
    l1_exit(-1);
617
  }
618
 
619
  l1_exit(0); // System terminated normally
620
 
621
}
622
 
623
void internal_sys_end(int i)
624
{
625
  LEVEL l;    /* a counter                                     */
626
  TIME tx;    /* a dummy used for time computation             */
627
  struct timespec ty; /* a dummy used for time computation     */
628
 
629
  //kern_printf("mustexit=%d",mustexit);
630
  if (!mustexit) {
631
    if (!ll_ActiveInt())
632
      proc_table[exec_shadow].context = kern_context_save();
633
 
634
    global_errnumber = i;
635
 
636
    mustexit = 1;
637
 
638
    if (exec_shadow != -1) {
639
      ll_gettime(TIME_EXACT, &schedule_time);
640
      /* manage the capacity event */
641
      SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
642
      tx = TIMESPEC2USEC(&ty);
643
      proc_table[exec_shadow].avail_time -= tx;
644
      jet_update_slice(tx);
645
      /* if the event didn't fire before, we delete it. */
646
      if (cap_timer != NIL) {
647
        event_delete(cap_timer);
648
        cap_timer = NIL;
649
      }
650
 
651
      /* then, we call the epilogue. the epilogue tipically checks the
652
         avail_time field... */
653
      l = proc_table[exec_shadow].task_level;
654
      level_table[l]->task_epilogue(l,exec_shadow);
655
 
656
      exec_shadow = exec = -1;
657
    }
658
 
659
    if (ll_ActiveInt())
660
      ll_context_to(global_context);
661
    else
662
      kern_context_load(global_context);
663
  }
664
  //kern_printf("fine sysend");
665
 
666
  /* the control reach this line only if we call sys_end() into an event
667
     handler (for example, if the event raises an exception with
668
     SA_USEFAST active and the exception calls sys_end() ) */
669
}
670
 
671
 
672
/*+ Close the system & return to HOST OS.
673
    Can be called from all the tasks...
674
    The first time it is called it jumps to the global context
675
    The second time it jumps only if there are no system task remaining
676
    The error code passed is 0... (it is saved on the first call!!!) +*/
677
void sys_end(void)
678
{
679
  SYS_FLAGS f;
680
 
681
  /* the sys_end change the context to the global context.
682
     when the first time is called, it simply kills all the users tasks
683
     and waits the system tasks to end... */
684
 
685
  /*kern_printf("°sys_end %d°",exec_shadow);*/
686
  /*return;*/
687
  f = kern_fsave();
688
  if (runlevel != RUNLEVEL_INIT && system_counter) {
689
    kern_frestore(f);
690
    return;
691
  }
692
 
693
  internal_sys_end(0);
694
  kern_frestore(f);
695
}
696
 
697
/*+ Close the system & return to HOST OS.
698
    Can be called from all the tasks...
699
    The first time it is called it works as the sys_end
700
    The second time it jumps every time
701
    The error code passed is 0... +*/
702
void sys_abort(int err)
703
{
704
  /* the sys_end change the context to the global context.
705
     when the first time is called, it simply kills all the users tasks
706
     and waits the system tasks to end... */
707
 
708
  internal_sys_end(err);
709
}
710
 
711
/*+ equal to sys_end! +*/
712
void _exit(int status)
713
{
714
  SYS_FLAGS f;
715
 
716
  /* the sys_end change the context to the global context.
717
     when the first time is called, it simply kills all the users tasks
718
     and waits the system tasks to end... */
719
 
720
  /*kern_printf("°sys_end %d°",exec_shadow);*/
721
  /*return;*/
722
  f = kern_fsave();
723
  if (runlevel != RUNLEVEL_INIT && system_counter) {
724
    kern_frestore(f);
725
    return;
726
  }
727
 
728
  internal_sys_end(status);
729
  kern_frestore(f);
730
}
731
 
732
 
733
 
734
/* this function is never called... used for the OSLib */
735
void sys_abort_tail(int code)
736
{
737
 //DUMMY!!!!
738
}
739
 
740
 
741
 
742
/*+ this primitive returns the time read from the system timer +*/
743
TIME sys_gettime(struct timespec *t)
744
{
745
  SYS_FLAGS f;
746
  TIME x;
747
 
748
  f = kern_fsave();
749
  x = ll_gettime(TIME_EXACT,t);
750
  kern_frestore(f);
751
 
752
  return x;
753
}
754
 
755