Subversion Repositories shark

Rev

Rev 809 | Rev 823 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
793 trimarchi 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
811 trimarchi 23
 CVS :        $Id: pistar.c,v 1.5 2004-09-06 08:58:05 trimarchi Exp $
793 trimarchi 24
 
25
 File:        $File$
811 trimarchi 26
 Revision:    $Revision: 1.5 $
27
 Last update: $Date: 2004-09-06 08:58:05 $
793 trimarchi 28
 ------------
29
 
30
 Priority Inhertitance protocol. see pi.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
 
800 trimarchi 55
 
793 trimarchi 56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/descr.h>
62
#include <kernel/var.h>
63
#include <kernel/func.h>
64
#include <fsf_contract.h>
65
#include <fsf_server.h>
800 trimarchi 66
#include <pistar.h>
793 trimarchi 67
 
68
#include <tracer.h>
69
 
70
/* The PISTAR resource level descriptor */
71
typedef struct {
72
  mutex_resource_des m;   /*+ the mutex interface +*/
73
 
74
  int nlocked[MAX_PROC];  /*+ how many mutex a task currently locks +*/
75
 
76
  PID blocked[MAX_PROC];  /*+ blocked queue ... +*/
77
} PISTAR_mutex_resource_des;
78
 
79
 
80
/* this is the structure normally pointed by the opt field in the
81
   mutex_t structure */
82
typedef struct {
83
  PID owner;
84
  int nblocked;
85
  PID firstblocked;
86
} PISTAR_mutex_t;
87
 
88
 
89
 
90
#if 0
91
/*+ print resource protocol statistics...+*/
92
static void PISTAR_resource_status(RLEVEL r)
93
{
94
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[r]);
95
  PID i;
96
 
97
  kern_printf("Resources owned by the tasks:\n");
98
  for (i=0; i<MAX_PROC; i++) {
99
     kern_printf("%-4d", m->nlocked[i]);
100
  }
101
}
102
#endif
103
 
104
static int PISTAR_res_register(RLEVEL l, PID p, RES_MODEL *r)
105
{
106
  /* priority inheritance works with all tasks without Resource parameters */
107
  return -1;
108
}
109
 
110
static void PISTAR_res_detach(RLEVEL l, PID p)
111
{
112
  PISTAR_mutex_resource_des *m = (PISTAR_mutex_resource_des *)(resource_table[l]);
113
 
114
  if (m->nlocked[p])
115
    kern_raise(XMUTEX_OWNER_KILLED, p);
116
}
117
 
118
static int PISTAR_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
119
{
120
  PISTAR_mutex_t *p;
121
 
122
  if (a->mclass != PISTAR_MCLASS)
123
    return -1;
124
 
125
  p = (PISTAR_mutex_t *) kern_alloc(sizeof(PISTAR_mutex_t));
126
 
127
  /* control if there is enough memory; no control on init on a
128
     non- destroyed mutex */
129
 
130
  if (!p)
131
    return (ENOMEM);
132
 
133
  p->owner        = NIL;
134
  p->nblocked     = 0;
135
  p->firstblocked = NIL;
136
 
137
  m->mutexlevel   = l;
138
  m->opt          = (void *)p;
139
 
140
  return 0;
141
}
142
 
143
 
144
static int PISTAR_destroy(RLEVEL l, mutex_t *m)
145
{
146
//  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
147
  SYS_FLAGS f;
148
 
149
  if ( ((PISTAR_mutex_t *)m->opt)->nblocked)
150
    return (EBUSY);
151
 
152
  f = kern_fsave();
153
  if (m->opt) {
154
    kern_free(m->opt,sizeof(PISTAR_mutex_t));
155
    m->opt = NULL;
156
  }
157
  kern_frestore(f);
158
 
159
  return 0;
160
}
161
 
162
/* Note that in this approach, when unlocking we can't wake up only
163
   one thread, but we have to wake up all the blocked threads, because there
164
   is not a concept of priority between the task... Each woken thread have
165
   to retest he condition.
166
   Normally, they retest it only one time, because if many threads are
167
   unblocked, they are scheduled basing on their priority (unkown in this
168
   module!)... and if the slice is greather than the critical sections,
169
   they never block!
170
   */
806 trimarchi 171
int PISTAR_lock(RLEVEL l, mutex_t *m, TIME wcet)
793 trimarchi 172
{
173
  PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
174
  PISTAR_mutex_t *p;
175
  SYS_FLAGS f;
176
//  return 0;
809 trimarchi 177
  int cond;
178
  cond = 1;
793 trimarchi 179
  fsf_server_id_t server;
180
 
181
  f =  kern_fsave();
811 trimarchi 182
  //kern_printf("(PISTAR lock)");
793 trimarchi 183
  p = (PISTAR_mutex_t *)m->opt;
184
  if (!p) {
185
    /* if the mutex is not initialized, return an error! */
186
    kern_frestore(f);
187
    return (EINVAL);
188
  }
189
 
190
 
191
  if (p->owner == exec_shadow) {
192
    /* the task already owns the mutex */
193
    kern_frestore(f);
194
    return (EDEADLK);
195
  }
196
  do {
197
  while (p->owner != NIL) {
198
    /* the mutex is locked by someone, "block" the task ...*/
199
    proc_table[exec_shadow].shadow = p->owner;
200
    lev->blocked[exec_shadow] = p->firstblocked;
201
    p->firstblocked = exec_shadow;
202
    p->nblocked++;
203
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
204
    /* ... call the scheduler... */
205
    scheduler();
206
    TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
207
    kern_context_load(proc_table[exec_shadow].context);
208
 
209
    /* ... and reaquire the cli() before the test... */
210
    kern_cli();
211
  }
212
  fsf_get_server(&server, exec_shadow);
809 trimarchi 213
  if (fsf_get_remain_budget(server)>wcet) cond=0;
793 trimarchi 214
  else {
215
    SERVER_disable_server(fsf_get_server_level(),server);
216
    scheduler();
217
    kern_context_load(proc_table[exec_shadow].context);
218
    /* ... and reaquire the cli() before the test... */
219
    kern_cli();
220
  }
221
 
222
  } while(cond);
223
 
224
  /* if we are here, we have budget for critical section */
225
  /* Set the task no preemptive for the localscheduler */
811 trimarchi 226
  //kern_printf("(PISTAR NP %d", exec_shadow);
793 trimarchi 227
  fsf_settask_nopreemptive(&server, exec_shadow);
228
 
229
  /* the mutex is free, We can lock it! */
230
  lev->nlocked[exec_shadow]++;
231
 
232
  p->owner = exec_shadow;
233
 
234
  kern_frestore(f);
235
 
236
  return 0;
237
}
238
 
239
static int PISTAR_trylock(RLEVEL l, mutex_t *m)
240
{
241
  PISTAR_mutex_t *p;
242
  SYS_FLAGS f;
243
 
244
  f = kern_fsave();
245
 
246
  p = (PISTAR_mutex_t *)m->opt;
247
  if (!p) {
248
    /* if the mutex is not initialized, return an error! */
249
    kern_frestore(f);
250
    return (EINVAL);
251
  }
252
 
253
  if (p->owner != NIL) {
254
    /* a task already owns the mutex */
255
    kern_frestore(f);
256
    return (EBUSY);
257
  }
258
  else {
259
    /* the mutex is free */
260
    PISTAR_mutex_resource_des *lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
261
    lev->nlocked[exec_shadow]++;
262
 
263
    p->owner = exec_shadow;
264
 
265
    kern_frestore(f);
266
    return 0;
267
  }
268
}
269
 
270
static int PISTAR_unlock(RLEVEL l, mutex_t *m)
271
{
272
  PISTAR_mutex_resource_des *lev;
273
  PISTAR_mutex_t *p;
274
  int i, j;
275
  fsf_server_id_t server;
276
 
277
//  return 0;
278
  p = (PISTAR_mutex_t *)m->opt;
279
  if (!p)
280
    return (EINVAL);
281
 
282
  if (p->owner != exec_shadow) {
283
    /* the mutex is owned by another task!!! */
284
    kern_sti();
285
    return (EPERM);
286
  }
287
 
288
  proc_table[exec_shadow].context = kern_context_save();
289
 
290
  /* the mutex is mine */
291
  lev = (PISTAR_mutex_resource_des *)(resource_table[l]);
292
  lev->nlocked[exec_shadow]--;
293
 
294
  p->owner = NIL;
295
 
296
  /* we unblock all the waiting tasks... */
297
  i = p->firstblocked;
298
  p->firstblocked = NIL;
299
 
300
  while (i != NIL) {
301
//    kern_printf("<<%d>>", i);
302
    proc_table[i].shadow = j = i;
303
    i = lev->blocked[i];
304
    lev->blocked[j] = NIL;
305
  }
306
  p->nblocked = 0;
307
 
308
/*  {
309
   int xxx;
811 trimarchi 310
   //kern_printf("(PISTAR_unlock owner=%d ",p->owner);
793 trimarchi 311
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
312
   kern_printf(")\n");
313
  }*/
314
 
315
  /* Set the task preemptive for the localscheduler */
316
  fsf_get_server(&server, exec_shadow);
317
  fsf_settask_preemptive(&server, exec_shadow);
318
 
319
  scheduler();
320
  TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
321
  kern_context_load(proc_table[exec_shadow].context);
322
 
323
  return 0;
324
}
325
 
326
RLEVEL PISTAR_register_module(void)
327
{
328
  RLEVEL l;                  /* the level that we register */
329
  PISTAR_mutex_resource_des *m;  /* for readableness only */
330
  PID i;                     /* a counter */
331
 
332
  printk("PISTAR_register_module\n");
333
 
334
  /* request an entry in the level_table */
335
  l = resource_alloc_descriptor();
336
 
337
  /* alloc the space needed for the EDF_level_des */
338
  m = (PISTAR_mutex_resource_des *)kern_alloc(sizeof(PISTAR_mutex_resource_des));
339
 
340
  /* update the level_table with the new entry */
341
  resource_table[l] = (resource_des *)m;
342
 
343
  /* fill the resource_des descriptor */
344
  m->m.r.rtype                       = MUTEX_RTYPE;
345
  m->m.r.res_register                = PISTAR_res_register;
346
  m->m.r.res_detach                  = PISTAR_res_detach;
347
 
348
  /* fill the mutex_resource_des descriptor */
349
  m->m.init                          = PISTAR_init;
350
  m->m.destroy                       = PISTAR_destroy;
806 trimarchi 351
  m->m.lock                          = NULL;
793 trimarchi 352
  m->m.trylock                       = PISTAR_trylock;
353
  m->m.unlock                        = PISTAR_unlock;
354
 
355
  /* fille the PISTAR_mutex_resource_des descriptor */
356
  for (i=0; i<MAX_PROC; i++) {
357
    m->nlocked[i] = 0;
358
    m->blocked[i] = NIL;
359
  }
360
 
361
  return l;
362
}
363