Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   (see the web pages for full authors list)
11
 *
12
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
13
 *
14
 * http://www.sssup.it
15
 * http://retis.sssup.it
16
 * http://shark.sssup.it
17
 */
18
 
19
/**
20
 ------------
21
 CVS :        $Id: kill.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
22
 
23
 File:        $File$
24
 Revision:    $Revision: 1.1.1.1 $
25
 Last update: $Date: 2002-03-29 14:12:52 $
26
 ------------
27
 
28
 This file contains:
29
 
30
 - the function that kill a task (task_kill, group_kill)
31
 - the function that frees a task descriptor (makefree)
32
 - the task_abort
33
 
34
**/
35
 
36
/*
37
 * Copyright (C) 2000 Paolo Gai
38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
#include <stdarg.h>
56
#include <ll/ll.h>
57
#include <ll/stdlib.h>
58
#include <ll/stdio.h>
59
#include <ll/string.h>
60
#include <kernel/config.h>
61
#include <kernel/model.h>
62
#include <kernel/const.h>
63
#include <sys/types.h>
64
#include <kernel/types.h>
65
#include <kernel/descr.h>
66
#include <errno.h>
67
#include <kernel/var.h>
68
#include <kernel/func.h>
69
#include <kernel/trace.h>
70
 
71
/*+ this structure contains the functions to be called to test if a
72
    task is blocked on a cancellation point +*/
73
static struct {
74
  int (*test)(PID p, void *arg);
75
  void *arg;
76
} cancellation_table[MAX_CANCPOINTS];
77
 
78
static int cancellation_points = 0;
79
 
80
void check_killed_async(void)
81
{
82
    if (proc_table[exec_shadow].control & KILL_ENABLED     &&
83
        !(proc_table[exec_shadow].control & KILL_DEFERRED) &&
84
        proc_table[exec_shadow].control & KILL_REQUEST        ) {
85
      task_makefree(TASK_CANCELED);
86
 
87
      ll_context_to(proc_table[exec_shadow].context);
88
      // never returns!!!
89
    }
90
}
91
 
92
/*+ This function register a cancellation point into the system.
93
    Be careful!!! no check are performed... +*/
94
void register_cancellation_point(int (*func)(PID p, void *arg), void *arg)
95
{
96
  cancellation_table[cancellation_points].test = func;
97
  cancellation_table[cancellation_points].arg = arg;
98
  cancellation_points++;
99
}
100
 
101
 
102
/*+
103
   This function frees the current task descriptor.
104
   It deletes the context and call the task_end and
105
   res_detach functions.
106
+*/
107
void task_makefree(void *ret)
108
{
109
    RLEVEL l;        /* used for managing the res_detach... */
110
    LEVEL lev;       /* for readableness */
111
    PID p;           /* used for controlling shadows */
112
    PID i;           /* used for readableness instead of exec_shadow */
113
 
114
    i = exec_shadow;
115
 
116
    /* first, contol if the task is joinable and someone is waiting for him
117
       This checks has to be done before  the shadow check (see below)
118
       because the task join uses the shadow field!!! */
119
    if (proc_table[i].control & TASK_JOINABLE) {
120
      proc_table[i].return_value = ret;
121
      proc_table[i].control |= WAIT_FOR_JOIN;
122
      if (proc_table[i].waiting_for_me != NIL) {
123
        /* someone is waiting for me... wake up him!!!
124
           Note that if the task woken up is killed by someone, the current
125
           task remain undetached; it is correct, as specified in 16.2.3 */
126
        register PID x;
127
        LEVEL l;
128
 
129
        x = proc_table[i].waiting_for_me;
130
        l = proc_table[x].task_level;
131
        level_table[l]->task_insert(l,x);
132
 
133
        proc_table[x].shadow = x;
134
      }
135
    }
136
 
137
/*  {
138
   int xxx;
139
   kern_printf("(makefree ");
140
   for (xxx = 0; xxx<7; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
141
   kern_printf(")\n");
142
  }*/
143
 
144
    /* we control if the task has someone with the shadow equal to
145
       the task being canceled... */
146
    for (p = 0; p<MAX_PROC; p++)
147
      if (p != i && proc_table[p].shadow == i) {
148
        kern_raise(XUNVALID_KILL_SHADOW,i);
149
        return;
150
      }
151
 
152
    /* we call the cleanups functions */
153
    while (proc_table[i].cleanup_stack) {
154
      kern_sti();
155
      proc_table[i].cleanup_stack->f(proc_table[i].cleanup_stack->a);
156
      kern_cli();
157
      proc_table[i].cleanup_stack = proc_table[i].cleanup_stack->next;
158
    }
159
 
160
    /* call the thread-specific destructors */
161
    call_task_specific_data_destructors();
162
 
163
    /* Free the used context space */
164
    kern_context_delete(proc_table[i].context);
165
 
166
    /* tracer stuff */
167
    trc_logevent(TRC_DESTROY,&i);
168
    //kern_printf("[k%i]",i);
169
 
170
    /* Free the used stack area */
171
    /* Warning! we could be executing on the stack we are */
172
    /* going to destroy; this should cause no problem as  */
173
    /* the task_kill() cannot be interrupted & the memory */
174
    /* released can only be reassigned when we exit the   */
175
    /* primitive!                                         */
176
    if (!(proc_table[i].control & STACKADDR_SPECIFIED))
177
      kern_free(proc_table[i].stack,proc_table[i].stacksize);
178
 
179
 
180
    for (l=0; l<res_levels; l++)
181
      resource_table[l]->res_detach(l,i);
182
 
183
    lev = proc_table[i].task_level;
184
    level_table[lev]->task_end(lev,i);
185
 
186
    /* THIS ASSIGNMENT MUST STAY HERE!!!
187
       if we move it near the scheduler (after the counter checks)
188
       the kernel doesn't work, because:
189
       - if the task is the last one, a sys_end is called, but exec_shadow
190
         is != -1, so the sys_end calls the task_epilogue that reinsert
191
         the KILLED task into the ready queue!!!
192
    */
193
    exec = exec_shadow = -1;
194
 
195
    /* Decrement the Application task counter and end the system
196
       if necessary*/
197
    if (!(proc_table[i].control & SYSTEM_TASK)) {
198
      //kern_printf("Ûtask%dÛ",task_counter);
199
      task_counter--;
200
      if (!task_counter)
201
        sys_end();
202
    }
203
    else if (!(proc_table[i].control & NO_KILL)) {
204
      //kern_printf("Ûsyst%dÛ",system_counter);
205
      system_counter--;
206
      if (!system_counter)
207
        sys_end();
208
    }
209
 
210
    /* SAME AS SCHEDULE, but not complete!!! */
211
    ll_gettime(TIME_EXACT, &schedule_time);
212
    /* we don't have to manage the capacity... because we are killing
213
       ourselves */
214
    if (cap_timer != NIL) {
215
      event_delete(cap_timer);
216
      cap_timer = NIL;
217
    }
218
    /* there is no epilogue... */
219
 
220
    scheduler();
221
}
222
 
223
/*
224
  This primitive autokills the excuting task; it was used to avoid
225
  that returning from a task cause a jmp to an unpredictable location.
226
 
227
  Now it is obsolete, the task_create_stub do all the works.
228
 
229
  It is used by the Posix layer to implement pthread_exit
230
*/
231
void task_abort(void *returnvalue)
232
{
233
    kern_cli();
234
 
235
    task_makefree(returnvalue);
236
 
237
    ll_context_to(proc_table[exec_shadow].context);
238
}
239
 
240
/*+
241
  This primitive kills the i-th task, free the descriptor, and the
242
  memory for the stack
243
 
244
  look also at kern_init in kern.c!!!
245
+*/
246
int task_kill(PID i)
247
{
248
    int j;        /* a counter */
249
 
250
    kern_cli();
251
    if (proc_table[i].control & NO_KILL ||
252
        proc_table[i].status == FREE) {
253
      errno = EUNVALID_KILL;
254
      kern_sti();
255
      return -1;
256
    }
257
 
258
    if (proc_table[i].control & KILL_REQUEST) {
259
      /* the task was already killed before... do nothing!!! */
260
      kern_sti();
261
      return 0;
262
    }
263
 
264
    /* if cancellation is enabled and asyncronous (not deferred!),
265
       and it is a suicide, kill now, otherwise set the cancellation bit */
266
    if (i == exec_shadow &&
267
        proc_table[i].control & KILL_ENABLED &&
268
        !(proc_table[i].control & KILL_DEFERRED)) {
269
      task_makefree(TASK_CANCELED);
270
 
271
      ll_context_to(proc_table[exec_shadow].context);
272
      // never returns!!!
273
    }
274
 
275
    /* check if the task is blocked on a cancellation point */
276
    for (j=0; j<cancellation_points; j++)
277
       if (cancellation_table[j].test(i,cancellation_table[j].arg))
278
         break;
279
 
280
//    kern_printf("Û%d", proc_table[i].control);
281
 
282
    proc_table[i].control |= KILL_REQUEST;
283
 
284
    kern_sti();
285
    return 0;
286
}
287
 
288
 
289
 
290
/* similar to task_kill */
291
int group_kill(WORD g)
292
{
293
    BYTE autokill;
294
    register PID i;
295
 
296
    int j;        /* a counter */
297
 
298
    if (g == 0) {
299
      errno = EUNVALID_GROUP;
300
      return -1;
301
    }
302
 
303
    kern_cli();
304
 
305
    /* Detect suicide */
306
    autokill = (g == proc_table[exec].group);
307
 
308
    for (i=0 ; i <  MAX_PROC; i++) {
309
        if (proc_table[i].control & NO_KILL      ||
310
            proc_table[i].status == FREE         ||
311
            proc_table[i].control & KILL_REQUEST ||
312
            proc_table[i].group != g             )
313
          continue;
314
 
315
        /* if cancellation is enabled and asyncronous (not deferred!),
316
           and it is a suicide, kill now, otherwise set the cancellation bit */
317
        if (i == exec_shadow &&
318
            proc_table[i].control & KILL_ENABLED &&
319
            !(proc_table[i].control & KILL_DEFERRED)) {
320
          task_makefree(TASK_CANCELED);
321
          continue;
322
        }
323
 
324
        /* check if the task is blocked on a cancellation point */
325
        for (j=0; j<cancellation_points; j++)
326
           if (cancellation_table[j].test(i,cancellation_table[j].arg))
327
             break;
328
 
329
        proc_table[i].control |= KILL_REQUEST;
330
 
331
    }
332
 
333
    /* If we were killing ourselves we must switch context */
334
    /* without saving the actual one                       */
335
    if (autokill) {
336
      ll_context_to(proc_table[exec].context);
337
    }
338
    else
339
      kern_sti();
340
 
341
    return 0;
342
}
343
 
344
 
345
/* this function is used into kernel_init to kill all the user tasks when
346
   the system goes to runlevel 2... */
347
void kill_user_tasks()
348
{
349
  PID k;
350
  int j;
351
 
352
  for (k=0; k<MAX_PROC; k++) {
353
    /* kill the task k!!! (see also task_kill in kill.c!!!) */
354
    if (proc_table[k].status == FREE ||
355
        proc_table[k].control & (KILL_REQUEST|NO_KILL|SYSTEM_TASK) )
356
      /* the task was already killed before... do nothing!!! */
357
      continue;
358
 
359
//    kern_printf("ÛKILLING %dÛ",k);
360
 
361
    /* check if the task is blocked on a cancellation point */
362
    for (j=0; j<cancellation_points; j++)
363
       if (cancellation_table[j].test(k,cancellation_table[j].arg))
364
         break;
365
 
366
    proc_table[k].control |= KILL_REQUEST;
367
  }
368
}