Subversion Repositories shark

Rev

Blame | Last modification | View Log | RSS feed

#include "hlp.h"

#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>

#include <tracer.h>

/* Just for debugging */
static void print_shadow(char *label, HLP_mutex_resource_des *l)
{
  HLP_tasklist_t *x = NULL;

  kern_printf("HLP module: %s:( ", label);
  for (x = l->tasklist; x != NULL; x = x->next)
    {
      kern_printf("(%d, %d) ", x->pid, proc_table[x->pid].shadow);
    }
  kern_printf(" )\n");
}

/* -----------------------------------------------------------------------
   LISTS HANDLING
   ----------------------------------------------------------------------- */


/* insert the task in the list ordered by preemption level */
static void HLP_insert_tasklist(HLP_tasklist_t **list, PID pid, DWORD preempt)
{
  HLP_tasklist_t *tmp = NULL, *scan = NULL, *prev = NULL;

  tmp = (HLP_tasklist_t *)kern_alloc(sizeof(HLP_tasklist_t));
  tmp->pid = pid;
  tmp->preempt = preempt;
  tmp->prev = NULL;
  tmp->next = NULL;

  scan = *list;

  while (scan && scan->preempt > preempt)
    {
      prev = scan;
      scan = scan->next;
    }

  if (prev == NULL) /* empty lista */
    {
      *list = tmp;
      tmp->next = NULL;
      tmp->prev = prev;
    }
  else
    if (scan == NULL) /* last position */
      {
        tmp->next = NULL;
        tmp->prev = prev;
        prev->next = tmp;
      }
    else
      {
        tmp->next = scan->next;
        scan->next->prev = tmp;
        scan->next = tmp;
      }
}

static void HLP_extract_tasklist(HLP_tasklist_t **list, PID pid)
{
  HLP_tasklist_t *scan = *list;

  while (scan && scan->pid != pid)
      scan = scan->next;

  if (scan != NULL)
    {
      scan->prev->next = scan->next;
      scan->next->prev = scan->prev;
      kern_free(scan, (sizeof(HLP_tasklist_t)));
    }
}

HLP_tasklist_t *HLP_tasklist_with_pid(HLP_tasklist_t *list, PID p)
{
  HLP_tasklist_t *ret = list;
 
  while (ret && ret->pid !=p)
    ret = ret->next;

  return ret;
}

/* -----------------------------------------------------------------------
   End of LISTS HANDLING
   ----------------------------------------------------------------------- */


   /**
   * HLP_res_register
   *
   * Called when a task_create is made and when HLP_usemutex is used
   */


static int HLP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
  HLP_mutex_resource_des *m = (HLP_mutex_resource_des *)(resource_table[l]);
  HLP_tasklist_t *tasklist = NULL;

  if (r->level && r->level !=l)
    return -1;

  if (r->rclass == HLP_RCLASS) /* register task */
    {
      HLP_RES_MODEL *hlp = (HLP_RES_MODEL *)r;

      HLP_insert_tasklist(&(m->tasklist), p, hlp->preempt); /* insert into HLP system task */
     
      return 0;
  }
  else if (r->rclass == HLP2_RCLASS)     /* a mutex passed via HLP_useres() */
    {
      HLP_mutex_t *mut = (HLP_mutex_t *)r;

      /* register mutex for task p */
      tasklist = HLP_tasklist_with_pid(m->tasklist, p);
     
      if (tasklist == NULL)
        return -1;
     
      mut->next = tasklist->mutexlist;
      tasklist->mutexlist = mut;

      /* register task for mutex */
      HLP_insert_tasklist(&(mut->tasklist), tasklist->pid, tasklist->preempt);

      kern_printf("HLP module: process %d uses mutex %p\n", tasklist->pid, mut);
      return 0;
    }
  else
    return -1;
}


/* called when task_kill is made */
static void HLP_res_detach(RLEVEL l, PID p)
{
  HLP_mutex_resource_des *m = (HLP_mutex_resource_des *)(resource_table[l]);
  HLP_mutex_t *mut;
  HLP_tasklist_t *cur;

  cur = HLP_tasklist_with_pid(m->tasklist, p);

  for (mut = cur->mutexlist; mut; mut = mut->next)
    if (mut->owner == p)
      kern_raise(XMUTEX_OWNER_KILLED, p);

  for (mut = m->mutexlist; mut; mut = mut->next)
    HLP_extract_tasklist(&(mut->tasklist), p);

  /* remove the task from the tasklist */
  HLP_extract_tasklist(&(m->tasklist), p);
}

/* called when a mutex_init on a HLP is made */
static int HLP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
  HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]);
  HLP_mutex_t *mut;


  if (a->mclass != HLP_MCLASS)
    return -1;

  mut = (HLP_mutex_t *) kern_alloc(sizeof(HLP_mutex_t));

  /* control if there is enough memory; no control on init on a
     non- destroyed mutex */


  if (!mut)
    return (ENOMEM);

  res_default_model(mut->r, HLP2_RCLASS);

  mut->owner = NIL;
  mut->tasklist = NULL;
  mut->next = lev->mutexlist;
 
  lev->mutexlist = mut;

  m->mutexlevel = l;
  m->opt = (void *)mut;

  return 0;
}

static int HLP_destroy(RLEVEL l, mutex_t *m)
{
  HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]);
  HLP_mutex_t *mut;
  SYS_FLAGS f;

  f = kern_fsave();

  mut = m->opt;

  if (mut->owner != NIL)
    return (EBUSY);

  /* extracting from the list of system mutexes */
  lev = lev;
  //  HLP_extract_hlplist(lev, mut);

  if (m->opt) {
    kern_free(m->opt,sizeof(HLP_mutex_t));
    m->opt = NULL;
  }
  kern_frestore(f);

  return 0;
}

/* mutex_lock */
static int HLP_lock(RLEVEL l, mutex_t *m)
{
  HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]);
  HLP_mutex_t *mut;
  HLP_tasklist_t *current, *taskscan;

  SYS_FLAGS f;

  f = kern_fsave();

  mut = (HLP_mutex_t *)m->opt;

  if (!mut) {
    /* if the mutex is not initialized */
    kern_frestore(f);
    return (EINVAL);
  }

  if (mut->owner == exec_shadow) {
    /* the task already owns the mutex */
    kern_frestore(f);
    return (EDEADLK);
  }

  current = HLP_tasklist_with_pid(mut->tasklist, exec_shadow);

  if (current == NULL ||  /* se il task non usa il mutex */
      mut->owner != NIL)  /* o il mutex e' gia' lockato */
  {
    kern_raise(XHLP_INVALID_LOCK, exec_shadow);
    kern_frestore(f);
    return (EINVAL);
  }

  /* we know that:
     - the task and the mutex that it wants to lock
     - the mutex is free
     => the task can lock now the mutex
  */


  mut->owner = exec_shadow;

  print_shadow("prelock", lev);

  /* shadow of tasks using the same mutexes, have to point to exec_shadow */

  for (taskscan = current->prev; taskscan != NULL; taskscan = taskscan->prev)
    proc_table[taskscan->pid].shadow = exec_shadow;

  print_shadow("postlock", lev);

  kern_frestore(f);

  return 0;
}

/* HLP_trylock is equal to HLP_lock because the HLP_lock don't block !!! */

static int HLP_unlock(RLEVEL l, mutex_t *m)
{
  HLP_mutex_resource_des *lev;
  HLP_mutex_t *mut, *mutscan;
  HLP_tasklist_t *current, *taskscan, *taskowner;

  lev = (HLP_mutex_resource_des *)(resource_table[l]);
  mut = (HLP_mutex_t *)m->opt;

  if (!mut)
    {
      kern_sti();
      return (EINVAL);
    }

  if (mut->owner != exec_shadow) {
    /* the mutex is owned by another task!!! */
    kern_sti();
    return (EPERM);
  }

  proc_table[exec_shadow].context = kern_context_save();

  mut->owner = NIL;

  /* recalculate shadow for tasks with preemption level upper
     than tasks with shadow pointing to current task */


  current = HLP_tasklist_with_pid(lev->tasklist, exec_shadow);
 
  print_shadow("preunlock", lev);

 /* for on tasks with upper preemption level */
  for (taskscan = current->prev; taskscan; taskscan = taskscan->prev)
    {
      /* task affected by hlp. Need to recalc the shadow */
      if (proc_table[taskscan->pid].shadow == exec_shadow &&
          taskscan->pid != current->pid)
        {
          proc_table[taskscan->pid].shadow = taskscan->pid;
          for (mutscan = taskscan->mutexlist; mutscan; mutscan = mutscan->next)
            {
              if (mutscan->owner != NIL)
                {
                  taskowner = HLP_tasklist_with_pid(mutscan->tasklist, mutscan->owner);
                  if (taskowner->preempt <= taskscan->preempt)
                    proc_table[taskscan->pid].shadow = mutscan->owner;
                }
            }
        }
    }
 
  print_shadow("postunlock", lev);

  scheduler();
  TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
  kern_context_load(proc_table[exec_shadow].context);

  return 0;
}

RLEVEL HLP_register_module(void)
{
  RLEVEL l;                  /* the level that we register */
  HLP_mutex_resource_des *m;  /* for readableness only */

  printk("HLP_register_module\n");

  /* request an entry in the level_table */
  l = resource_alloc_descriptor();

  /* alloc the space needed for the EDF_level_des */
  m = (HLP_mutex_resource_des *)kern_alloc(sizeof(HLP_mutex_resource_des));

  /* update the level_table with the new entry */
  resource_table[l] = (resource_des *)m;

  /* fill the resource_des descriptor */
  m->m.r.rtype                       = MUTEX_RTYPE;
  m->m.r.res_register                = HLP_res_register;
  m->m.r.res_detach                  = HLP_res_detach;

  /* fill the mutex_resource_des descriptor */
  m->m.init                          = HLP_init;
  m->m.destroy                       = HLP_destroy;
  m->m.lock                          = HLP_lock;
  m->m.trylock                       = HLP_lock;  /* equal!!! */
  m->m.unlock                        = HLP_unlock;

  /* fill the HLP_mutex_resource_des descriptor */
  m->tasklist = NULL;
  m->mutexlist = NULL;

  return l;
}