Subversion Repositories shark

Rev

Rev 1011 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
793 trimarchi 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
1011 trimarchi 23
 CVS :        $Id: pistar.c,v 1.10 2005-05-12 07:50:10 trimarchi Exp $
793 trimarchi 24
 
25
 File:        $File$
1011 trimarchi 26
 Revision:    $Revision: 1.10 $
27
 Last update: $Date: 2005-05-12 07:50:10 $
793 trimarchi 28
 ------------
29
 
30
 Priority Inhertitance protocol. see pi.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
 
800 trimarchi 55
 
793 trimarchi 56
#include <ll/ll.h>
1689 fabio 57
#include <arch/string.h>
58
#include <arch/stdio.h>
793 trimarchi 59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/descr.h>
62
#include <kernel/var.h>
63
#include <kernel/func.h>
867 trimarchi 64
#include "fsf_configuration_parameters.h"
65
#include "fsf_core.h"
66
#include "fsf_server.h"
800 trimarchi 67
#include <pistar.h>
793 trimarchi 68
 
69
#include <tracer.h>
70
 
71
/* The PISTAR resource level descriptor */
72
typedef struct {
73
  mutex_resource_des m;   /*+ the mutex interface +*/
74
 
75
  int nlocked[MAX_PROC];  /*+ how many mutex a task currently locks +*/
76
 
77
  PID blocked[MAX_PROC];  /*+ blocked queue ... +*/
1011 trimarchi 78
 
793 trimarchi 79
} PISTAR_mutex_resource_des;
80
 
81
 
82
/* this is the structure normally pointed by the opt field in the
83
   mutex_t structure */
84
typedef struct {
85
  PID owner;
86
  int nblocked;
87
  PID firstblocked;
1011 trimarchi 88
  //TIME wcet;
793 trimarchi 89
} PISTAR_mutex_t;
90
 
91
 
92
 
93
#if 0
94
/*+ print resource protocol statistics...+*/
95
static void PISTAR_resource_status(RLEVEL r)
96
{
97
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[r]);
98
  PID i;
99
 
100
  kern_printf("Resources owned by the tasks:\n");
101
  for (i=0; i<MAX_PROC; i++) {
102
     kern_printf("%-4d", m->nlocked[i]);
103
  }
104
}
105
#endif
106
 
107
static int PISTAR_res_register(RLEVEL l, PID p, RES_MODEL *r)
108
{
109
  /* priority inheritance works with all tasks without Resource parameters */
110
  return -1;
111
}
112
 
113
static void PISTAR_res_detach(RLEVEL l, PID p)
114
{
115
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[l]);
116
 
117
  if (m->nlocked[p])
118
    kern_raise(XMUTEX_OWNER_KILLED, p);
119
}
120
 
121
static int PISTAR_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
122
{
123
  PISTAR_mutex_t *p;
124
 
125
  if (a->mclass != PISTAR_MCLASS)
126
    return -1;
127
 
128
  p = (PISTAR_mutex_t *) kern_alloc(sizeof(PISTAR_mutex_t));
129
 
130
  /* control if there is enough memory; no control on init on a
131
     non- destroyed mutex */
132
 
133
  if (!p)
134
    return (ENOMEM);
135
 
136
  p->owner        = NIL;
137
  p->nblocked     = 0;
138
  p->firstblocked = NIL;
139
 
140
  m->mutexlevel   = l;
141
  m->opt          = (void *)p;
142
 
143
  return 0;
144
}
145
 
146
 
147
static int PISTAR_destroy(RLEVEL l, mutex_t *m)
148
{
149
//  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
150
  SYS_FLAGS f;
151
 
152
  if ( ((PISTAR_mutex_t *)m->opt)->nblocked)
153
    return (EBUSY);
154
 
155
  f = kern_fsave();
156
  if (m->opt) {
157
    kern_free(m->opt,sizeof(PISTAR_mutex_t));
158
    m->opt = NULL;
159
  }
160
  kern_frestore(f);
161
 
162
  return 0;
163
}
867 trimarchi 164
#if defined OLD_VERSION
793 trimarchi 165
/* Note that in this approach, when unlocking we can't wake up only
166
   one thread, but we have to wake up all the blocked threads, because there
167
   is not a concept of priority between the task... Each woken thread have
168
   to retest he condition.
169
   Normally, they retest it only one time, because if many threads are
170
   unblocked, they are scheduled basing on their priority (unkown in this
171
   module!)... and if the slice is greather than the critical sections,
172
   they never block!
173
   */
806 trimarchi 174
int PISTAR_lock(RLEVEL l, mutex_t *m, TIME wcet)
793 trimarchi 175
{
176
  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
177
  PISTAR_mutex_t *p;
178
  SYS_FLAGS f;
179
//  return 0;
809 trimarchi 180
  int cond;
181
  cond = 1;
793 trimarchi 182
  fsf_server_id_t server;
183
 
184
  f =  kern_fsave();
829 giacomo 185
 
186
  TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)m);
187
 
811 trimarchi 188
  //kern_printf("(PISTAR lock)");
793 trimarchi 189
  p = (PISTAR_mutex_t *)m->opt;
190
  if (!p) {
191
    /* if the mutex is not initialized, return an error! */
192
    kern_frestore(f);
193
    return (EINVAL);
194
  }
195
 
196
 
197
  if (p->owner == exec_shadow) {
198
    /* the task already owns the mutex */
199
    kern_frestore(f);
200
    return (EDEADLK);
201
  }
1011 trimarchi 202
 
793 trimarchi 203
  do {
204
  while (p->owner != NIL) {
205
    /* the mutex is locked by someone, "block" the task ...*/
206
    proc_table[exec_shadow].shadow = p->owner;
207
    lev->blocked[exec_shadow] = p->firstblocked;
208
    p->firstblocked = exec_shadow;
209
    p->nblocked++;
210
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
211
    /* ... call the scheduler... */
212
    scheduler();
213
    TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
214
    kern_context_load(proc_table[exec_shadow].context);
215
 
216
    /* ... and reaquire the cli() before the test... */
217
    kern_cli();
218
  }
867 trimarchi 219
  fsf_get_server(exec_shadow, &server);
809 trimarchi 220
  if (fsf_get_remain_budget(server)>wcet) cond=0;
793 trimarchi 221
  else {
222
    SERVER_disable_server(fsf_get_server_level(),server);
223
    scheduler();
823 trimarchi 224
 
793 trimarchi 225
    kern_context_load(proc_table[exec_shadow].context);
226
    /* ... and reaquire the cli() before the test... */
227
    kern_cli();
228
  }
229
 
230
  } while(cond);
231
 
232
  /* if we are here, we have budget for critical section */
233
  /* Set the task no preemptive for the localscheduler */
811 trimarchi 234
  //kern_printf("(PISTAR NP %d", exec_shadow);
793 trimarchi 235
  fsf_settask_nopreemptive(&server, exec_shadow);
236
 
237
  /* the mutex is free, We can lock it! */
238
  lev->nlocked[exec_shadow]++;
239
 
240
  p->owner = exec_shadow;
241
 
242
  kern_frestore(f);
243
 
244
  return 0;
245
}
867 trimarchi 246
#else 
793 trimarchi 247
 
867 trimarchi 248
int PISTAR_lock(RLEVEL l, mutex_t *m)
249
{
250
  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
251
  PISTAR_mutex_t *p;
252
  SYS_FLAGS f;
253
//  return 0;
1011 trimarchi 254
  //int cond;
255
  //cond = 1;
256
  //fsf_server_id_t server;
867 trimarchi 257
 
258
  f =  kern_fsave();
259
 
260
  TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)m);
261
 
262
  //kern_printf("(PISTAR lock)");
263
  p = (PISTAR_mutex_t *)m->opt;
264
  if (!p) {
265
    /* if the mutex is not initialized, return an error! */
266
    kern_frestore(f);
267
    return (EINVAL);
268
  }
269
 
270
 
271
  if (p->owner == exec_shadow) {
272
    /* the task already owns the mutex */
273
    kern_frestore(f);
274
    return (EDEADLK);
275
  }
276
 
277
  while (p->owner != NIL) {
278
    /* the mutex is locked by someone, "block" the task ...*/
279
    proc_table[exec_shadow].shadow = p->owner;
280
    lev->blocked[exec_shadow] = p->firstblocked;
281
    p->firstblocked = exec_shadow;
282
    p->nblocked++;
283
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
284
    /* ... call the scheduler... */
285
    scheduler();
286
    TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
287
    kern_context_load(proc_table[exec_shadow].context);
288
 
289
    /* ... and reaquire the cli() before the test... */
290
    kern_cli();
291
  }
292
 
293
 
294
  /* if we are here, we have budget for critical section */
295
  /* Set the task no preemptive for the localscheduler */
296
  //kern_printf("(PISTAR NP %d", exec_shadow);
1011 trimarchi 297
  //fsf_get_server(exec_shadow, &server);
298
  //fsf_settask_nopreemptive(&server, exec_shadow);
867 trimarchi 299
 
300
  /* the mutex is free, We can lock it! */
301
  lev->nlocked[exec_shadow]++;
302
 
303
  p->owner = exec_shadow;
304
 
305
  kern_frestore(f);
306
 
307
  return 0;
308
}
309
#endif
310
 
311
 
793 trimarchi 312
static int PISTAR_trylock(RLEVEL l, mutex_t *m)
313
{
314
  PISTAR_mutex_t *p;
315
  SYS_FLAGS f;
316
 
317
  f = kern_fsave();
318
 
319
  p = (PISTAR_mutex_t *)m->opt;
320
  if (!p) {
321
    /* if the mutex is not initialized, return an error! */
322
    kern_frestore(f);
323
    return (EINVAL);
324
  }
325
 
326
  if (p->owner != NIL) {
327
    /* a task already owns the mutex */
328
    kern_frestore(f);
329
    return (EBUSY);
330
  }
331
  else {
332
    /* the mutex is free */
333
    PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
334
    lev->nlocked[exec_shadow]++;
335
 
336
    p->owner = exec_shadow;
337
 
338
    kern_frestore(f);
339
    return 0;
340
  }
341
}
342
 
343
static int PISTAR_unlock(RLEVEL l, mutex_t *m)
344
{
345
  PISTAR_mutex_resource_des *lev;
346
  PISTAR_mutex_t *p;
347
  int i, j;
348
  fsf_server_id_t server;
823 trimarchi 349
  //kern_printf("PISTAR unlock");
793 trimarchi 350
//  return 0;
351
  p = (PISTAR_mutex_t *)m->opt;
352
  if (!p)
353
    return (EINVAL);
354
 
355
  if (p->owner != exec_shadow) {
356
    /* the mutex is owned by another task!!! */
357
    kern_sti();
358
    return (EPERM);
359
  }
360
 
361
  proc_table[exec_shadow].context = kern_context_save();
362
 
363
  /* the mutex is mine */
364
  lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
365
  lev->nlocked[exec_shadow]--;
366
 
367
  p->owner = NIL;
368
 
369
  /* we unblock all the waiting tasks... */
370
  i = p->firstblocked;
371
  p->firstblocked = NIL;
372
 
373
  while (i != NIL) {
374
//    kern_printf("<<%d>>", i);
375
    proc_table[i].shadow = j = i;
376
    i = lev->blocked[i];
377
    lev->blocked[j] = NIL;
378
  }
379
  p->nblocked = 0;
380
 
381
/*  {
382
   int xxx;
811 trimarchi 383
   //kern_printf("(PISTAR_unlock owner=%d ",p->owner);
793 trimarchi 384
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
385
   kern_printf(")\n");
386
  }*/
387
 
388
  /* Set the task preemptive for the localscheduler */
1011 trimarchi 389
  //fsf_get_server(exec_shadow, &server);
390
  //fsf_settask_preemptive(&server, exec_shadow);
793 trimarchi 391
 
392
  scheduler();
393
  TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
394
  kern_context_load(proc_table[exec_shadow].context);
395
 
396
  return 0;
397
}
398
 
399
RLEVEL PISTAR_register_module(void)
400
{
401
  RLEVEL l;                  /* the level that we register */
402
  PISTAR_mutex_resource_des *m;  /* for readableness only */
403
  PID i;                     /* a counter */
404
 
405
  printk("PISTAR_register_module\n");
406
 
407
  /* request an entry in the level_table */
408
  l = resource_alloc_descriptor();
409
 
410
  /* alloc the space needed for the EDF_level_des */
411
  m = (PISTAR_mutex_resource_des *)kern_alloc(sizeof(PISTAR_mutex_resource_des));
412
 
413
  /* update the level_table with the new entry */
414
  resource_table[l] = (resource_des *)m;
415
 
416
  /* fill the resource_des descriptor */
417
  m->m.r.rtype                       = MUTEX_RTYPE;
418
  m->m.r.res_register                = PISTAR_res_register;
419
  m->m.r.res_detach                  = PISTAR_res_detach;
420
 
421
  /* fill the mutex_resource_des descriptor */
422
  m->m.init                          = PISTAR_init;
423
  m->m.destroy                       = PISTAR_destroy;
867 trimarchi 424
  m->m.lock                          = PISTAR_lock;
793 trimarchi 425
  m->m.trylock                       = PISTAR_trylock;
426
  m->m.unlock                        = PISTAR_unlock;
427
 
428
  /* fille the PISTAR_mutex_resource_des descriptor */
429
  for (i=0; i<MAX_PROC; i++) {
430
    m->nlocked[i] = 0;
431
    m->blocked[i] = NIL;
432
  }
433
 
434
  return l;
435
}
436