Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: nopm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-03-29 14:12:52 $
28
 ------------
29
 
30
 See modules/nopm.h.
31
 This code is a copy of nop.c with minor modifications.
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Massimiliano Giorgi
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
#include <modules/nop.h>
55
 
56
#include <ll/ll.h>
57
#include <ll/stdio.h>
58
#include <ll/string.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <modules/codes.h>
62
#include <kernel/descr.h>
63
#include <kernel/var.h>
64
#include <kernel/func.h>
65
 
66
/* The NOPM resource level descriptor */
67
typedef struct {
68
  mutex_resource_des m;   /*+ the mutex interface +*/
69
} NOPM_mutex_resource_des;
70
 
71
 
72
/* this is the structure normally pointed by the opt field in the
73
   mutex_t structure */
74
typedef struct {
75
  PID owner;
76
  QQUEUE blocked;
77
  int counter;
78
} NOPM_mutex_t;
79
 
80
 
81
 
82
 
83
 
84
 
85
 
86
 
87
 
88
#define MAXTABLE 4096
89
static mutex_t *table[MAXTABLE];
90
static int index=0;
91
 
92
static int register_nopm(mutex_t *p)
93
{
94
  if (index>=MAXTABLE) return -1;
95
  table[index++]=p;
96
  return 0;
97
}
98
 
99
void dump_nopm_table(void)
100
{
101
  NOPM_mutex_t *ptr;
102
  SYS_FLAGS f;
103
  PID j;
104
  int i;
105
 
106
  f=kern_fsave();
107
  kern_printf("nopm_mutex module TABLE\n");
108
  kern_printf("----------------------\n");
109
  for(i=0;i<index;i++) {
110
    ptr=table[i]->opt;
111
    if (ptr->blocked.first!=NIL) {
112
      kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]);
113
      j=ptr->blocked.first;
114
      while (j!=NIL) {
115
        kern_printf("%i ",(int)j);
116
        j=proc_table[j].next;
117
      }
118
      kern_printf("\n");
119
    } else {
120
      //kern_printf("0x%p no block\n",table[i]);
121
    }      
122
  }
123
  kern_frestore(f);
124
 
125
}
126
 
127
 
128
 
129
 
130
 
131
 
132
 
133
 
134
 
135
 
136
 
137
/* Wait status for this library */
138
#define NOPM_WAIT LIB_STATUS_BASE
139
 
140
 
141
/*+ print resource protocol statistics...+*/
142
static void NOPM_resource_status(RLEVEL r)
143
{
144
  kern_printf("No status for NOPM module\n");
145
}
146
 
147
 
148
static int NOPM_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
149
{
150
  /* priority inheritance works with all tasks without Resource parameters */
151
  return -1;
152
}
153
 
154
static void NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r)
155
{
156
  /* never called!!! */
157
}
158
 
159
static void NOPM_res_detach(RLEVEL l, PID p)
160
{
161
}
162
 
163
static int NOPM_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
164
{
165
  if (a->mclass == NOPM_MCLASS || a->mclass == (NOPM_MCLASS | l) )
166
    return 0;
167
  else
168
    return -1;
169
}
170
 
171
static int NOPM_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
172
{
173
  NOPM_mutex_t *p;
174
 
175
  p = (NOPM_mutex_t *) kern_alloc(sizeof(NOPM_mutex_t));
176
 
177
  /* control if there is enough memory; no control on init on a
178
     non- destroyed mutex */
179
 
180
  if (!p)
181
    return (ENOMEM);
182
 
183
  p->owner = NIL;
184
  qq_init(&p->blocked);
185
  p->counter=0;
186
 
187
  m->mutexlevel = l;
188
  m->opt = (void *)p;
189
 
190
  /* MG */
191
  register_nopm(m);
192
 
193
  return 0;
194
}
195
 
196
 
197
static int NOPM_destroy(RLEVEL l, mutex_t *m)
198
{
199
//  NOPM_mutex_resource_des *lev = (NOPM_mutex_resource_des *)(resource_table[l]);
200
 
201
  if ( ((NOPM_mutex_t *)m->opt)->owner != NIL)
202
    return (EBUSY);
203
 
204
  kern_cli();
205
  if (m->opt) {
206
    kern_free(m->opt,sizeof(NOPM_mutex_t));
207
    m->opt = NULL;
208
  }
209
  kern_sti();
210
 
211
  return 0;
212
}
213
 
214
static int NOPM_lock(RLEVEL l, mutex_t *m)
215
{
216
  NOPM_mutex_t *p;
217
 
218
  kern_cli();
219
 
220
  p = (NOPM_mutex_t *)m->opt;
221
  if (!p) {
222
    /* if the mutex is not initialized, initialize it! */
223
    NOPM_mutexattr_t a;
224
    NOPM_mutexattr_default(a);
225
    NOPM_init(l, m, &a);
226
  }
227
 
228
  if (p->owner == exec_shadow) {
229
    /* the task already owns the mutex */
230
    p->counter++;
231
    kern_sti();
232
    return 0;
233
  }
234
 
235
  if (p->owner != NIL)  {           /* We must block exec task   */
236
       LEVEL l;            /* for readableness only */
237
       TIME tx;            /* a dummy TIME for timespec operations */
238
       struct timespec ty; /* a dummy timespec for timespec operations */
239
 
240
       proc_table[exec_shadow].context = kern_context_save();
241
       /* SAME AS SCHEDULER... manage the capacity event and the load_info */
242
       ll_gettime(TIME_EXACT, &schedule_time);
243
       SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
244
       tx = TIMESPEC2USEC(&ty);
245
       proc_table[exec_shadow].avail_time -= tx;
246
       jet_update_slice(tx);
247
       if (cap_timer != NIL) {
248
         event_delete(cap_timer);
249
         cap_timer = NIL;
250
       }
251
 
252
       l = proc_table[exec_shadow].task_level;
253
       level_table[l]->task_extract(l,exec_shadow);
254
 
255
       /* we insert the task in the semaphore queue */
256
       proc_table[exec_shadow].status = NOPM_WAIT;
257
       qq_insertlast(exec_shadow,&p->blocked);
258
 
259
       /* and finally we reschedule */
260
       exec = exec_shadow = -1;
261
       scheduler();
262
       kern_context_load(proc_table[exec_shadow].context);            
263
  }
264
  else {
265
    /* the mutex is free, We can lock it! */
266
    p->owner = exec_shadow;
267
    p->counter++;
268
    kern_sti();
269
  }
270
 
271
  return 0;
272
}
273
 
274
static int NOPM_trylock(RLEVEL l, mutex_t *m)
275
{
276
  NOPM_mutex_t *p;
277
 
278
  kern_cli();
279
 
280
  p = (NOPM_mutex_t *)m->opt;
281
  if (!p) {
282
    /* if the mutex is not initialized, initialize it! */
283
    NOPM_mutexattr_t a;
284
    NOPM_mutexattr_default(a);
285
    NOPM_init(l, m, &a);
286
  }
287
 
288
  if (p->owner != NIL)  {
289
    /* a task already owns the mutex */
290
    kern_sti();
291
    return (EBUSY);
292
  }
293
  else {
294
    /* the mutex is free, We can lock it! */
295
    p->owner = exec_shadow;
296
    p->counter++;
297
    kern_sti();
298
  }
299
 
300
  return 0;
301
}
302
 
303
static int NOPM_unlock(RLEVEL l, mutex_t *m)
304
{
305
  NOPM_mutex_t *p;
306
  PID e;
307
 
308
  p = (NOPM_mutex_t *)m->opt;
309
  if (!p)
310
    return (EINVAL);
311
 
312
  if (p->owner != exec_shadow) {
313
    /* the mutex is owned by another task!!! */
314
    kern_printf("wrongunlock<owner=%i,unlocker=%i>",p->owner,exec_shadow);
315
    kern_sti();
316
    return (EPERM);
317
  }
318
 
319
  p->counter--;
320
  if (p->counter!=0) {
321
    /* we have multiple lock on this mutex */
322
    kern_sti();
323
    return 0;
324
  }
325
 
326
  proc_table[exec_shadow].context = kern_context_save();
327
 
328
  /* the mutex is mine, pop the firsttask to extract */
329
  for (;;) {
330
    e = qq_getfirst(&p->blocked);
331
    if (e == NIL) {
332
      p->owner = NIL;
333
      break;
334
    } else if (proc_table[e].status == NOPM_WAIT) {
335
      l = proc_table[e].task_level;
336
      level_table[l]->task_insert(l,e);
337
      p->counter++;
338
      break;
339
    }
340
  }
341
 
342
  /* MG!!! */
343
  p->owner = e;
344
 
345
  scheduler();
346
  kern_context_load(proc_table[exec_shadow].context);
347
 
348
  return 0;
349
}
350
 
351
void NOPM_register_module(void)
352
{
353
  RLEVEL l;                  /* the level that we register */
354
  NOPM_mutex_resource_des *m;  /* for readableness only */
355
 
356
  printk("NOPM_register_module\n");
357
 
358
  /* request an entry in the level_table */
359
  l = resource_alloc_descriptor();
360
 
361
  /* alloc the space needed for the EDF_level_des */
362
  m = (NOPM_mutex_resource_des *)kern_alloc(sizeof(NOPM_mutex_resource_des));
363
 
364
  /* update the level_table with the new entry */
365
  resource_table[l] = (resource_des *)m;
366
 
367
  /* fill the resource_des descriptor */
368
  strncpy(m->m.r.res_name, NOPM_MODULENAME, MAX_MODULENAME);
369
  m->m.r.res_code                    = NOPM_MODULE_CODE;
370
  m->m.r.res_version                 = NOPM_MODULE_VERSION;
371
 
372
  m->m.r.rtype                       = MUTEX_RTYPE;
373
 
374
  m->m.r.resource_status             = NOPM_resource_status;
375
  m->m.r.level_accept_resource_model = NOPM_level_accept_resource_model;
376
  m->m.r.res_register                = NOPM_res_register;
377
 
378
  m->m.r.res_detach                  = NOPM_res_detach;
379
 
380
  /* fill the mutex_resource_des descriptor */
381
  m->m.level_accept_mutexattr        = NOPM_level_accept_mutexattr;
382
  m->m.init                          = NOPM_init;
383
  m->m.destroy                       = NOPM_destroy;
384
  m->m.lock                          = NOPM_lock;
385
  m->m.trylock                       = NOPM_trylock;
386
  m->m.unlock                        = NOPM_unlock;
387
 
388
}
389