Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
#ifndef __LINUX_SMPLOCK_H
2
#define __LINUX_SMPLOCK_H
3
 
4
#include <linux/config.h>
5
#include <linux/sched.h>
6
#include <linux/spinlock.h>
7
 
8
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
9
 
10
extern spinlock_t kernel_flag;
11
 
12
#define kernel_locked()         (current->lock_depth >= 0)
13
 
14
#define get_kernel_lock()       spin_lock(&kernel_flag)
15
#define put_kernel_lock()       spin_unlock(&kernel_flag)
16
 
17
/*
18
 * Release global kernel lock.
19
 */
20
static inline void release_kernel_lock(struct task_struct *task)
21
{
22
        if (unlikely(task->lock_depth >= 0))
23
                put_kernel_lock();
24
}
25
 
26
/*
27
 * Re-acquire the kernel lock
28
 */
29
static inline void reacquire_kernel_lock(struct task_struct *task)
30
{
31
        if (unlikely(task->lock_depth >= 0))
32
                get_kernel_lock();
33
}
34
 
35
/*
36
 * Getting the big kernel lock.
37
 *
38
 * This cannot happen asynchronously,
39
 * so we only need to worry about other
40
 * CPU's.
41
 */
42
static inline void lock_kernel(void)
43
{
44
        int depth = current->lock_depth+1;
45
        if (likely(!depth))
46
                get_kernel_lock();
47
        current->lock_depth = depth;
48
}
49
 
50
static inline void unlock_kernel(void)
51
{
52
        if (unlikely(current->lock_depth < 0))
53
                BUG();
54
        if (likely(--current->lock_depth < 0))
55
                put_kernel_lock();
56
}
57
 
58
#else
59
 
60
#define lock_kernel()                           do { } while(0)
61
#define unlock_kernel()                         do { } while(0)
62
#define release_kernel_lock(task)               do { } while(0)
63
#define reacquire_kernel_lock(task)             do { } while(0)
64
#define kernel_locked()                         1
65
 
66
#endif /* CONFIG_SMP || CONFIG_PREEMPT */
67
#endif /* __LINUX_SMPLOCK_H */