Subversion Repositories shark

Rev

Rev 829 | Rev 1011 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
793 trimarchi 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
867 trimarchi 23
 CVS :        $Id: pistar.c,v 1.9 2004-10-25 14:39:31 trimarchi Exp $
793 trimarchi 24
 
25
 File:        $File$
867 trimarchi 26
 Revision:    $Revision: 1.9 $
27
 Last update: $Date: 2004-10-25 14:39:31 $
793 trimarchi 28
 ------------
29
 
30
 Priority Inhertitance protocol. see pi.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
 
800 trimarchi 55
 
793 trimarchi 56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/descr.h>
62
#include <kernel/var.h>
63
#include <kernel/func.h>
867 trimarchi 64
#include "fsf_configuration_parameters.h"
65
#include "fsf_core.h"
66
#include "fsf_server.h"
800 trimarchi 67
#include <pistar.h>
793 trimarchi 68
 
69
#include <tracer.h>
70
 
71
/* The PISTAR resource level descriptor */
72
typedef struct {
73
  mutex_resource_des m;   /*+ the mutex interface +*/
74
 
75
  int nlocked[MAX_PROC];  /*+ how many mutex a task currently locks +*/
76
 
77
  PID blocked[MAX_PROC];  /*+ blocked queue ... +*/
78
} PISTAR_mutex_resource_des;
79
 
80
 
81
/* this is the structure normally pointed by the opt field in the
82
   mutex_t structure */
83
typedef struct {
84
  PID owner;
85
  int nblocked;
86
  PID firstblocked;
87
} PISTAR_mutex_t;
88
 
89
 
90
 
91
#if 0
92
/*+ print resource protocol statistics...+*/
93
static void PISTAR_resource_status(RLEVEL r)
94
{
95
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[r]);
96
  PID i;
97
 
98
  kern_printf("Resources owned by the tasks:\n");
99
  for (i=0; i<MAX_PROC; i++) {
100
     kern_printf("%-4d", m->nlocked[i]);
101
  }
102
}
103
#endif
104
 
105
static int PISTAR_res_register(RLEVEL l, PID p, RES_MODEL *r)
106
{
107
  /* priority inheritance works with all tasks without Resource parameters */
108
  return -1;
109
}
110
 
111
static void PISTAR_res_detach(RLEVEL l, PID p)
112
{
113
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[l]);
114
 
115
  if (m->nlocked[p])
116
    kern_raise(XMUTEX_OWNER_KILLED, p);
117
}
118
 
119
static int PISTAR_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
120
{
121
  PISTAR_mutex_t *p;
122
 
123
  if (a->mclass != PISTAR_MCLASS)
124
    return -1;
125
 
126
  p = (PISTAR_mutex_t *) kern_alloc(sizeof(PISTAR_mutex_t));
127
 
128
  /* control if there is enough memory; no control on init on a
129
     non- destroyed mutex */
130
 
131
  if (!p)
132
    return (ENOMEM);
133
 
134
  p->owner        = NIL;
135
  p->nblocked     = 0;
136
  p->firstblocked = NIL;
137
 
138
  m->mutexlevel   = l;
139
  m->opt          = (void *)p;
140
 
141
  return 0;
142
}
143
 
144
 
145
static int PISTAR_destroy(RLEVEL l, mutex_t *m)
146
{
147
//  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
148
  SYS_FLAGS f;
149
 
150
  if ( ((PISTAR_mutex_t *)m->opt)->nblocked)
151
    return (EBUSY);
152
 
153
  f = kern_fsave();
154
  if (m->opt) {
155
    kern_free(m->opt,sizeof(PISTAR_mutex_t));
156
    m->opt = NULL;
157
  }
158
  kern_frestore(f);
159
 
160
  return 0;
161
}
867 trimarchi 162
#if defined OLD_VERSION
793 trimarchi 163
/* Note that in this approach, when unlocking we can't wake up only
164
   one thread, but we have to wake up all the blocked threads, because there
165
   is not a concept of priority between the task... Each woken thread have
166
   to retest he condition.
167
   Normally, they retest it only one time, because if many threads are
168
   unblocked, they are scheduled basing on their priority (unkown in this
169
   module!)... and if the slice is greather than the critical sections,
170
   they never block!
171
   */
806 trimarchi 172
int PISTAR_lock(RLEVEL l, mutex_t *m, TIME wcet)
793 trimarchi 173
{
174
  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
175
  PISTAR_mutex_t *p;
176
  SYS_FLAGS f;
177
//  return 0;
809 trimarchi 178
  int cond;
179
  cond = 1;
793 trimarchi 180
  fsf_server_id_t server;
181
 
182
  f =  kern_fsave();
829 giacomo 183
 
184
  TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)m);
185
 
811 trimarchi 186
  //kern_printf("(PISTAR lock)");
793 trimarchi 187
  p = (PISTAR_mutex_t *)m->opt;
188
  if (!p) {
189
    /* if the mutex is not initialized, return an error! */
190
    kern_frestore(f);
191
    return (EINVAL);
192
  }
193
 
194
 
195
  if (p->owner == exec_shadow) {
196
    /* the task already owns the mutex */
197
    kern_frestore(f);
198
    return (EDEADLK);
199
  }
200
  do {
201
  while (p->owner != NIL) {
202
    /* the mutex is locked by someone, "block" the task ...*/
203
    proc_table[exec_shadow].shadow = p->owner;
204
    lev->blocked[exec_shadow] = p->firstblocked;
205
    p->firstblocked = exec_shadow;
206
    p->nblocked++;
207
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
208
    /* ... call the scheduler... */
209
    scheduler();
210
    TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
211
    kern_context_load(proc_table[exec_shadow].context);
212
 
213
    /* ... and reaquire the cli() before the test... */
214
    kern_cli();
215
  }
867 trimarchi 216
  fsf_get_server(exec_shadow, &server);
809 trimarchi 217
  if (fsf_get_remain_budget(server)>wcet) cond=0;
793 trimarchi 218
  else {
219
    SERVER_disable_server(fsf_get_server_level(),server);
220
    scheduler();
823 trimarchi 221
 
793 trimarchi 222
    kern_context_load(proc_table[exec_shadow].context);
223
    /* ... and reaquire the cli() before the test... */
224
    kern_cli();
225
  }
226
 
227
  } while(cond);
228
 
229
  /* if we are here, we have budget for critical section */
230
  /* Set the task no preemptive for the localscheduler */
811 trimarchi 231
  //kern_printf("(PISTAR NP %d", exec_shadow);
793 trimarchi 232
  fsf_settask_nopreemptive(&server, exec_shadow);
233
 
234
  /* the mutex is free, We can lock it! */
235
  lev->nlocked[exec_shadow]++;
236
 
237
  p->owner = exec_shadow;
238
 
239
  kern_frestore(f);
240
 
241
  return 0;
242
}
867 trimarchi 243
#else 
793 trimarchi 244
 
867 trimarchi 245
int PISTAR_lock(RLEVEL l, mutex_t *m)
246
{
247
  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
248
  PISTAR_mutex_t *p;
249
  SYS_FLAGS f;
250
//  return 0;
251
  int cond;
252
  cond = 1;
253
  fsf_server_id_t server;
254
 
255
  f =  kern_fsave();
256
 
257
  TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)m);
258
 
259
  //kern_printf("(PISTAR lock)");
260
  p = (PISTAR_mutex_t *)m->opt;
261
  if (!p) {
262
    /* if the mutex is not initialized, return an error! */
263
    kern_frestore(f);
264
    return (EINVAL);
265
  }
266
 
267
 
268
  if (p->owner == exec_shadow) {
269
    /* the task already owns the mutex */
270
    kern_frestore(f);
271
    return (EDEADLK);
272
  }
273
 
274
  while (p->owner != NIL) {
275
    /* the mutex is locked by someone, "block" the task ...*/
276
    proc_table[exec_shadow].shadow = p->owner;
277
    lev->blocked[exec_shadow] = p->firstblocked;
278
    p->firstblocked = exec_shadow;
279
    p->nblocked++;
280
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
281
    /* ... call the scheduler... */
282
    scheduler();
283
    TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
284
    kern_context_load(proc_table[exec_shadow].context);
285
 
286
    /* ... and reaquire the cli() before the test... */
287
    kern_cli();
288
  }
289
 
290
 
291
  /* if we are here, we have budget for critical section */
292
  /* Set the task no preemptive for the localscheduler */
293
  //kern_printf("(PISTAR NP %d", exec_shadow);
294
  fsf_get_server(exec_shadow, &server);
295
  fsf_settask_nopreemptive(&server, exec_shadow);
296
 
297
  /* the mutex is free, We can lock it! */
298
  lev->nlocked[exec_shadow]++;
299
 
300
  p->owner = exec_shadow;
301
 
302
  kern_frestore(f);
303
 
304
  return 0;
305
}
306
#endif
307
 
308
 
793 trimarchi 309
static int PISTAR_trylock(RLEVEL l, mutex_t *m)
310
{
311
  PISTAR_mutex_t *p;
312
  SYS_FLAGS f;
313
 
314
  f = kern_fsave();
315
 
316
  p = (PISTAR_mutex_t *)m->opt;
317
  if (!p) {
318
    /* if the mutex is not initialized, return an error! */
319
    kern_frestore(f);
320
    return (EINVAL);
321
  }
322
 
323
  if (p->owner != NIL) {
324
    /* a task already owns the mutex */
325
    kern_frestore(f);
326
    return (EBUSY);
327
  }
328
  else {
329
    /* the mutex is free */
330
    PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
331
    lev->nlocked[exec_shadow]++;
332
 
333
    p->owner = exec_shadow;
334
 
335
    kern_frestore(f);
336
    return 0;
337
  }
338
}
339
 
340
static int PISTAR_unlock(RLEVEL l, mutex_t *m)
341
{
342
  PISTAR_mutex_resource_des *lev;
343
  PISTAR_mutex_t *p;
344
  int i, j;
345
  fsf_server_id_t server;
823 trimarchi 346
  //kern_printf("PISTAR unlock");
793 trimarchi 347
//  return 0;
348
  p = (PISTAR_mutex_t *)m->opt;
349
  if (!p)
350
    return (EINVAL);
351
 
352
  if (p->owner != exec_shadow) {
353
    /* the mutex is owned by another task!!! */
354
    kern_sti();
355
    return (EPERM);
356
  }
357
 
358
  proc_table[exec_shadow].context = kern_context_save();
359
 
360
  /* the mutex is mine */
361
  lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
362
  lev->nlocked[exec_shadow]--;
363
 
364
  p->owner = NIL;
365
 
366
  /* we unblock all the waiting tasks... */
367
  i = p->firstblocked;
368
  p->firstblocked = NIL;
369
 
370
  while (i != NIL) {
371
//    kern_printf("<<%d>>", i);
372
    proc_table[i].shadow = j = i;
373
    i = lev->blocked[i];
374
    lev->blocked[j] = NIL;
375
  }
376
  p->nblocked = 0;
377
 
378
/*  {
379
   int xxx;
811 trimarchi 380
   //kern_printf("(PISTAR_unlock owner=%d ",p->owner);
793 trimarchi 381
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
382
   kern_printf(")\n");
383
  }*/
384
 
385
  /* Set the task preemptive for the localscheduler */
867 trimarchi 386
  fsf_get_server(exec_shadow, &server);
793 trimarchi 387
  fsf_settask_preemptive(&server, exec_shadow);
388
 
389
  scheduler();
390
  TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
391
  kern_context_load(proc_table[exec_shadow].context);
392
 
393
  return 0;
394
}
395
 
396
RLEVEL PISTAR_register_module(void)
397
{
398
  RLEVEL l;                  /* the level that we register */
399
  PISTAR_mutex_resource_des *m;  /* for readableness only */
400
  PID i;                     /* a counter */
401
 
402
  printk("PISTAR_register_module\n");
403
 
404
  /* request an entry in the level_table */
405
  l = resource_alloc_descriptor();
406
 
407
  /* alloc the space needed for the EDF_level_des */
408
  m = (PISTAR_mutex_resource_des *)kern_alloc(sizeof(PISTAR_mutex_resource_des));
409
 
410
  /* update the level_table with the new entry */
411
  resource_table[l] = (resource_des *)m;
412
 
413
  /* fill the resource_des descriptor */
414
  m->m.r.rtype                       = MUTEX_RTYPE;
415
  m->m.r.res_register                = PISTAR_res_register;
416
  m->m.r.res_detach                  = PISTAR_res_detach;
417
 
418
  /* fill the mutex_resource_des descriptor */
419
  m->m.init                          = PISTAR_init;
420
  m->m.destroy                       = PISTAR_destroy;
867 trimarchi 421
  m->m.lock                          = PISTAR_lock;
793 trimarchi 422
  m->m.trylock                       = PISTAR_trylock;
423
  m->m.unlock                        = PISTAR_unlock;
424
 
425
  /* fille the PISTAR_mutex_resource_des descriptor */
426
  for (i=0; i<MAX_PROC; i++) {
427
    m->nlocked[i] = 0;
428
    m->blocked[i] = NIL;
429
  }
430
 
431
  return l;
432
}
433