Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: sem.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-03-29 14:12:52 $
28
 ------------
29
 
30
 This file contains the Hartik 3.3.1 Semaphore functions
31
 
32
 Author:      Giuseppe Lipari
33
 
34
 Semaphores:
35
 this is the generalized version of the primitives signal & wait
36
 In this case, the user can specify the number to inc/dec the
37
 semaphore's counter. It is useful in the buffer management
38
 (see port section)
39
 
40
**/
41
 
42
/*
43
 * Copyright (C) 2000 Paolo Gai
44
 *
45
 * This program is free software; you can redistribute it and/or modify
46
 * it under the terms of the GNU General Public License as published by
47
 * the Free Software Foundation; either version 2 of the License, or
48
 * (at your option) any later version.
49
 *
50
 * This program is distributed in the hope that it will be useful,
51
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
52
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
53
 * GNU General Public License for more details.
54
 *
55
 * You should have received a copy of the GNU General Public License
56
 * along with this program; if not, write to the Free Software
57
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
58
 *
59
 */
60
 
61
 
62
#include <stdarg.h>
63
#include <modules/sem.h>
64
#include <kernel/config.h>
65
#include <ll/ll.h>
66
#include <ll/string.h>
67
#include <kernel/const.h>
68
#include <sys/types.h>
69
#include <kernel/descr.h>
70
#include <kernel/var.h>
71
#include <kernel/func.h>
72
#include <kernel/trace.h>
73
#include <limits.h>
74
#include <fcntl.h>
75
 
76
 
77
/* Semaphores descriptor tables */
78
static struct sem_des {
79
    char *name;     /* a name, for named semaphores */
80
    int index;      /* an index for sem_open, containing the sem number */
81
    int count;      /* the semaphore counter */
82
    QQUEUE blocked; /* the blocked processes queue */
83
    int next;       /* the semaphore queue */
84
    BYTE used;      /* 1 if the semaphore is used */
85
} sem_table[SEM_NSEMS_MAX];
86
 
87
 
88
/* this -IS- an extension to the proc_table!!! */
89
static struct {
90
    int decsem;   /* the value required in sem_xwait */
91
    int sem;      /* the semaphore on whitch the process is blocked */
92
} sp_table[MAX_PROC];
93
 
94
static QUEUE free_sem;         /* Queue of free sem                    */
95
 
96
 
97
 
98
/*----------------------------------------------------------------------*/
99
/* Cancellation test for semaphores                                     */
100
/*----------------------------------------------------------------------*/
101
 
102
/* this is the test that is done when a task is being killed
103
   and it is waiting on a sigwait */
104
static int semwait_cancellation_point(PID i, void *arg)
105
{
106
    LEVEL l;
107
 
108
    if (proc_table[i].status == WAIT_SEM) {
109
      /* the task that have to be killed is waiting on a sig_wait.
110
         we reset the data structures set in sig_wait and then when the
111
         task will return on the sig_wait it will fall into a
112
         task_testcancel */
113
 
114
      /* extract the process from the semaphore queue... */
115
      qq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
116
 
117
      l = proc_table[i].task_level;
118
      level_table[l]->task_insert(l,i);
119
 
120
      return 1;
121
    }
122
 
123
    return 0;
124
}
125
 
126
/*----------------------------------------------------------------------*/
127
/* Init the semaphoric structures                                       */
128
/*----------------------------------------------------------------------*/
129
void SEM_register_module(void)
130
{
131
    int i;
132
 
133
    for (i = 0; i < SEM_NSEMS_MAX; i++) {
134
        sem_table[i].name = NULL;
135
        sem_table[i].index = i;
136
        sem_table[i].count = 0;
137
        qq_init(&sem_table[i].blocked);
138
        sem_table[i].next = i+1;
139
        sem_table[i].used = 0;
140
    }
141
    sem_table[SEM_NSEMS_MAX-1].next = NIL;
142
    free_sem = 0;
143
 
144
    register_cancellation_point(semwait_cancellation_point, NULL);
145
}
146
 
147
/*----------------------------------------------------------------------*/
148
/* Allocate a semaphoric descriptor and sets the counter to n           */
149
/*----------------------------------------------------------------------*/
150
 
151
// the pshared parameter is NRQ for PSE52
152
int sem_init(sem_t *sem, int pshared, unsigned int value)
153
{
154
    if (value > SEM_VALUE_MAX)
155
      return EINVAL;
156
 
157
    kern_cli();
158
    *sem = free_sem;
159
    if (*sem != NIL) {
160
        free_sem = sem_table[*sem].next;
161
        sem_table[*sem].name = NULL;
162
        sem_table[*sem].count = value;
163
        qq_init(&sem_table[*sem].blocked);
164
        sem_table[*sem].used = 1;
165
    }
166
    else {
167
        errno = ENOSPC;
168
        kern_sti();
169
        return -1;
170
    }
171
    kern_sti();
172
    return 0;
173
}
174
 
175
/*----------------------------------------------------------------------*/
176
/* Frees a semaphores descriptor                                        */
177
/*----------------------------------------------------------------------*/
178
int sem_destroy(sem_t *sem)
179
{
180
    kern_cli();
181
 
182
    if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
183
      errno = EINVAL;
184
      kern_sti();
185
      return -1;
186
    }
187
 
188
    if (sem_table[*sem].blocked.first != NIL) {
189
      errno = EBUSY;
190
      kern_sti();
191
      return -1;
192
    }
193
 
194
    sem_table[*sem].used = 0;
195
    sem_table[*sem].next = free_sem;
196
    free_sem = *sem;
197
 
198
    kern_sti();
199
    return 0;
200
}
201
 
202
/*----------------------------------------------------------------------*/
203
/* Allocate a named semaphore                                           */
204
/*----------------------------------------------------------------------*/
205
 
206
// the pshared parameter is NRQ for PSE52
207
sem_t *sem_open(const char *name, int oflag, ...)
208
{
209
    int i, j;
210
    int found = 0;
211
    mode_t m;
212
    sem_t sem;
213
 
214
    kern_cli();
215
 
216
    for (i = 0; i < SEM_NSEMS_MAX; i++)
217
      if (sem_table[i].used) {
218
        if (strcmp(name, sem_table[i].name) == 0) {
219
          found = 1;
220
          break;
221
        }
222
      }
223
    if (found) {
224
      if (oflag == (O_CREAT | O_EXCL)) {
225
          errno = EEXIST;
226
          kern_sti();
227
          return SEM_FAILED;
228
      } else {
229
          kern_sti();
230
          return &sem_table[i].index;
231
      }
232
    } else {
233
      if (!(oflag & O_CREAT)) {
234
          errno = ENOENT;
235
          kern_sti();
236
          return SEM_FAILED;
237
      } else {
238
          va_list l;
239
 
240
          va_start(l, oflag);
241
            m = va_arg(l,mode_t);
242
            j = va_arg(l, int);
243
          va_end(l);
244
 
245
          if (j > SEM_VALUE_MAX) {
246
            errno = EINVAL;
247
            kern_sti();
248
            return SEM_FAILED;
249
          }
250
 
251
          sem = free_sem;
252
          if (sem != -1) {
253
            free_sem = sem_table[sem].next;
254
            sem_table[sem].name = kern_alloc(strlen((char *)name)+1);
255
            strcpy(sem_table[sem].name, (char *)name);
256
            sem_table[sem].count = j;
257
            qq_init(&sem_table[sem].blocked);
258
            sem_table[sem].used = 1;
259
            kern_sti();
260
            return &sem_table[sem].index;
261
          }
262
          else {
263
            errno = ENOSPC;
264
            kern_sti();
265
            return SEM_FAILED;
266
          }
267
      }
268
    }
269
}
270
 
271
/*----------------------------------------------------------------------*/
272
/* Frees a named semaphore                                              */
273
/*----------------------------------------------------------------------*/
274
int sem_close(sem_t *sem)
275
{
276
    kern_cli();
277
 
278
    if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
279
      errno = EINVAL;
280
      kern_sti();
281
      return -1;
282
    }
283
 
284
/*  why not???
285
    if (sem_table[*sem].q_first != -1) {
286
      errno = EBUSY;
287
      kern_sti();
288
      return -1;
289
    } */
290
 
291
    kern_free(sem_table[*sem].name,strlen(sem_table[*sem].name)+1);
292
    sem_table[*sem].used = 0;
293
    sem_table[*sem].next = free_sem;
294
    free_sem = *sem;
295
 
296
    kern_sti();
297
    return 0;
298
}
299
 
300
/*----------------------------------------------------------------------*/
301
/* Unlink a named semaphore                                             */
302
/*----------------------------------------------------------------------*/
303
int sem_unlink(const char *name)
304
{
305
    int i;
306
    int found = 0;
307
 
308
    kern_cli();
309
 
310
    for (i = 0; i < SEM_NSEMS_MAX; i++)
311
      if (sem_table[i].used) {
312
        if (strcmp(name, sem_table[i].name) == 0) {
313
          found = 1;
314
        }
315
      }
316
 
317
    if (found) {
318
      kern_free(sem_table[i].name,strlen((char *)name)+1);
319
      sem_table[i].used = 0;
320
      sem_table[i].next = free_sem;
321
      free_sem = i;
322
      kern_sti();
323
      return 0;
324
    } else {
325
      errno = ENOENT;
326
      kern_sti();
327
      return SEM_FAILED;
328
    }
329
}
330
 
331
/*----------------------------------------------------------------------*/
332
/* Generic wait. If it is possible, decrements the sem counter of n,    */
333
/* else blocks the task.                                                */
334
/*----------------------------------------------------------------------*/
335
int sem_wait(sem_t *s)
336
{
337
    struct sem_des *s1; /* It speeds up access */
338
 
339
    if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
340
      errno = EINVAL;
341
      return -1;
342
    }
343
 
344
    task_testcancel();
345
 
346
    proc_table[exec_shadow].context = kern_context_save();
347
 
348
    s1 = &sem_table[*s];
349
 
350
    if (s1->blocked.first != NIL || s1->count == 0)  {
351
            /* We must block exec task   */
352
            LEVEL l;            /* for readableness only */
353
            TIME tx;            /* a dummy TIME for timespec operations */
354
            struct timespec ty; /* a dummy timespec for timespec operations */
355
 
356
            /* tracer stuff */
357
            trc_logevent(TRC_SEM_WAIT,s);
358
 
359
            /* SAME AS SCHEDULER... manage the capacity event and the load_info */
360
            ll_gettime(TIME_EXACT, &schedule_time);
361
            SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
362
            tx = TIMESPEC2USEC(&ty);
363
            proc_table[exec_shadow].avail_time -= tx;
364
            jet_update_slice(tx);
365
            if (cap_timer != NIL) {
366
              event_delete(cap_timer);
367
              cap_timer = NIL;
368
            }
369
 
370
            l = proc_table[exec_shadow].task_level;
371
            level_table[l]->task_extract(l,exec_shadow);
372
 
373
            /* we insert the task in the semaphore queue */
374
            proc_table[exec_shadow].status = WAIT_SEM;
375
 
376
            /* Prepare sem_table des... */
377
            sp_table[exec_shadow].decsem = 1;
378
            sp_table[exec_shadow].sem = *s;
379
 
380
            /* ...and put it in sem queue */
381
            qq_insertlast(exec_shadow,&s1->blocked);
382
 
383
            /* and finally we reschedule */
384
            exec = exec_shadow = -1;
385
            scheduler();
386
            kern_context_load(proc_table[exec_shadow].context);
387
 
388
            /* sem_wait is a cancellation point... */
389
            task_testcancel();
390
    }
391
    else {
392
            s1->count--;
393
            /* tracer stuff */
394
            trc_logevent(TRC_SEM_WAIT,s);
395
            kern_context_load(proc_table[exec_shadow].context);
396
    }
397
 
398
    return 0;
399
}
400
 
401
/*----------------------------------------------------------------------*/
402
/* Non-blocking wait                                                    */
403
/*----------------------------------------------------------------------*/
404
int sem_trywait(sem_t *s)
405
{
406
    struct sem_des *s1; /* It speeds up access */
407
 
408
    if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
409
      errno = EINVAL;
410
      return -1;
411
    }
412
 
413
    kern_cli();
414
 
415
    s1 = &sem_table[*s];
416
 
417
    /* tracer stuff */
418
    //trc_logevent(TRC_SEM_WAITNB,s);
419
 
420
    if (s1->blocked.first != NIL || s1->count == 0)  {
421
      errno = EAGAIN;
422
      kern_sti();
423
      return -1;
424
    }
425
    else
426
      s1->count--;
427
 
428
    kern_sti();
429
    return 0;
430
}
431
 
432
 
433
/*----------------------------------------------------------------------*/
434
/* Generic wait. If it is possible, decrements the sem counter of n,    */
435
/* else blocks the task.                                                */
436
/*----------------------------------------------------------------------*/
437
int sem_xwait(sem_t *s, int n, int wait)
438
{
439
    struct sem_des *s1; /* It speeds up access */
440
 
441
    if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
442
      errno = EINVAL;
443
      return -1;
444
    }
445
 
446
    /* We do not need to save context if we are sure we shall not block! */
447
    if (wait == NON_BLOCK)
448
      kern_cli();
449
    else
450
      proc_table[exec_shadow].context = kern_context_save();
451
 
452
    s1 = &sem_table[*s];
453
 
454
    /* The non blocking wait is really simple! */
455
    /* We do not suspend or schedule anything  */    
456
    if (wait == NON_BLOCK) {
457
      /* tracer */
458
      //trc_logevent(TRC_SEM_WAITNB,s);
459
 
460
      if (s1->blocked.first != NIL || s1->count < n)  {
461
        errno = EAGAIN;
462
        kern_sti();
463
        return -1;
464
      }
465
      else
466
        s1->count -= n;
467
 
468
      kern_sti();
469
      return 0;
470
    }
471
    /* The blocking wait is more complex... */
472
    else {
473
        /* the blocking wait is a cancellation point */
474
        task_testcancel();
475
 
476
        if (s1->blocked.first != NIL || s1->count < n)  {
477
                    /* We must block exec task   */
478
                    LEVEL l;            /* for readableness only */
479
                    TIME tx;            /* a dummy TIME for timespec operations */
480
                    struct timespec ty; /* a dummy timespec for timespec operations */
481
 
482
                    /* tracer */
483
                    trc_logevent(TRC_SEM_WAIT,s);
484
 
485
                    /* SAME AS SCHEDULER... manage the capacity event and the load_info */
486
                    ll_gettime(TIME_EXACT, &schedule_time);
487
                    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
488
                    tx = TIMESPEC2USEC(&ty);
489
                    proc_table[exec_shadow].avail_time -= tx;
490
                    jet_update_slice(tx);
491
                    if (cap_timer != NIL) {
492
                      event_delete(cap_timer);
493
                      cap_timer = NIL;
494
                    }
495
 
496
                    l = proc_table[exec_shadow].task_level;
497
                    level_table[l]->task_extract(l,exec_shadow);
498
 
499
                    /* we insert the task in the semaphore queue */
500
                    proc_table[exec_shadow].status = WAIT_SEM;
501
 
502
                    /* Prepare sem_table des... */
503
                    sp_table[exec_shadow].decsem = n;
504
                    sp_table[exec_shadow].sem = *s;
505
 
506
                    /* ...and put it in sem queue */
507
                    qq_insertlast(exec_shadow,&s1->blocked);
508
 
509
                    /* and finally we reschedule */
510
                    exec = exec_shadow = -1;
511
                    scheduler();
512
                    kern_context_load(proc_table[exec_shadow].context);
513
 
514
                    /* sem_wait is a cancellation point... */
515
                    task_testcancel();
516
            }
517
            else {
518
                    s1->count -= n;
519
                    /* tracer */
520
                    trc_logevent(TRC_SEM_WAIT,s);
521
                    kern_context_load(proc_table[exec_shadow].context);
522
            }
523
    }
524
 
525
    return 0;
526
}
527
 
528
/*----------------------------------------------------------------------*/
529
/* Generic signal. It increments the sem counter of 1, and wakes one    */
530
/* of the tasks that are blocked on the semaphore, if it is possible.The*/
531
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks    */
532
/*----------------------------------------------------------------------*/
533
int sem_post(sem_t *s)
534
{
535
    struct sem_des *s1;        /* it speeds up access          */
536
    int p;                     /* idem                         */
537
    LEVEL l;
538
 
539
    if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
540
      errno = EINVAL;
541
      return -1;
542
    }
543
 
544
    // ugly patch to call a sem_post!!!
545
    if (ll_ActiveInt()) {
546
      SYS_FLAGS f;
547
      f = kern_fsave();
548
      s1 = &sem_table[*s];
549
      s1->count ++;              /* inc sem count                */
550
 
551
      p = s1->blocked.first;
552
      if (p != NIL && sp_table[p].decsem <= s1->count) {
553
        /* Dec sem count */
554
        s1->count -= sp_table[p].decsem;
555
 
556
        /* Get task from blocked queue */
557
        qq_extract(p,&s1->blocked);
558
 
559
        l = proc_table[p].task_level;
560
        level_table[l]->task_insert(l,p);
561
        /* only a task can be awaken */
562
        /* Preempt if necessary */
563
        event_need_reschedule();
564
      }
565
 
566
      /* tracer */
567
      trc_logevent(TRC_SEM_SIGNAL,s);
568
      kern_frestore(f);
569
    }
570
    else {
571
      proc_table[exec].context = kern_context_save();
572
 
573
      s1 = &sem_table[*s];
574
      s1->count ++;              /* inc sem count                */
575
 
576
      p = s1->blocked.first;
577
      if (p != NIL && sp_table[p].decsem <= s1->count) {
578
        /* Dec sem count */
579
        s1->count -= sp_table[p].decsem;
580
 
581
        /* Get task from blocked queue */
582
        qq_extract(p,&s1->blocked);
583
 
584
        l = proc_table[p].task_level;
585
        level_table[l]->task_insert(l,p);
586
        /* only a task can be awaken */
587
        /* Preempt if necessary */
588
        scheduler();
589
      }
590
 
591
      /* tracer */
592
      trc_logevent(TRC_SEM_SIGNAL,s);
593
 
594
      kern_context_load(proc_table[exec_shadow].context);
595
    }
596
 
597
    return 0;
598
}
599
 
600
/*----------------------------------------------------------------------*/
601
/* Generic signal. It increments the sem counter of n, and wakes all the*/
602
/* tasks that are blocked on the semaphore, if it is possible. The      */
603
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks    */
604
/*----------------------------------------------------------------------*/
605
int sem_xpost(sem_t *s, int n)
606
{
607
    struct sem_des *s1;        /* it speeds up access          */
608
    int p;                     /* idem                         */
609
    int fl = 0;                /* a flag                       */
610
    LEVEL l;
611
 
612
    if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
613
      errno = EINVAL;
614
      return -1;
615
    }
616
 
617
    // ugly patch to call a sem_post!!!
618
    if (ll_ActiveInt()) {      
619
      SYS_FLAGS f;
620
      f = kern_fsave();
621
      s1 = &sem_table[*s];
622
      s1->count += n;                     /* inc sem count                */
623
 
624
      p = s1->blocked.first;
625
      while (p != NIL && sp_table[p].decsem <= s1->count) {
626
        /* Dec sem count */
627
        s1->count -= sp_table[p].decsem;
628
 
629
        /* Get task from blocked queue */
630
        qq_extract(p,&s1->blocked);
631
 
632
        l = proc_table[p].task_level;
633
        level_table[l]->task_insert(l,p);
634
 
635
        /* Next task to wake            */
636
        p = s1->blocked.first;
637
 
638
        fl = 1;
639
      }
640
 
641
      /* tracer */
642
      trc_logevent(TRC_SEM_SIGNAL,s);
643
 
644
      /* Preempt if necessary */
645
      if (fl) event_need_reschedule();
646
      kern_frestore(f);
647
    }
648
    else {
649
      proc_table[exec].context = kern_context_save();
650
 
651
      s1 = &sem_table[*s];
652
      s1->count += n;                     /* inc sem count                */
653
 
654
      p = s1->blocked.first;
655
      while (p != NIL && sp_table[p].decsem <= s1->count) {
656
        /* Dec sem count */
657
        s1->count -= sp_table[p].decsem;
658
 
659
        /* Get task from blocked queue */
660
        qq_extract(p,&s1->blocked);
661
 
662
        l = proc_table[p].task_level;
663
        level_table[l]->task_insert(l,p);
664
 
665
        /* Next task to wake            */
666
        p = s1->blocked.first;
667
 
668
        fl = 1;
669
      }
670
 
671
      /* tracer */
672
      trc_logevent(TRC_SEM_SIGNAL,s);
673
 
674
      /* Preempt if necessary */
675
      if (fl) scheduler();
676
 
677
      kern_context_load(proc_table[exec_shadow].context);
678
    }
679
 
680
    return 0;
681
}
682
 
683
/*----------------------------------------------------------------------*/
684
/* Getvalue returns the value of the semaphore (>=0). If someone is     */
685
/* blocked on the semaphore, return the number of process blocked (<0)  */
686
/*----------------------------------------------------------------------*/
687
int sem_getvalue(sem_t *sem, int *sval)
688
{
689
    PID p;
690
 
691
    if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
692
      errno = EINVAL;
693
      return -1;
694
    }
695
 
696
    kern_cli();
697
 
698
    if (sem_table[*sem].blocked.first == NIL)
699
      /* the sem is free */
700
      *sval = sem_table[*sem].count;
701
    else {
702
      /* the sem is busy */
703
      *sval = 0;
704
      p = sem_table[*sem].blocked.first;
705
      do {
706
        (*sval)--;
707
        p = proc_table[p].next;
708
      } while (p != NIL);
709
    }
710
 
711
    kern_sti();
712
    return 0;
713
}
714
 
715
 
716
/*----------------------------------------------------------------------*/
717
/* this function returns 1 if the task is blocked on a semaphore        */
718
/*----------------------------------------------------------------------*/
719
int isBlocked(PID i)
720
{
721
    if (proc_table[i].status == WAIT_SEM) return 1;
722
    else return 0;
723
}
724