Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion |
||
3 | * |
||
4 | * This program is free software; you can redistribute it and/or modify |
||
5 | * it under the terms of the GNU General Public License as published by |
||
6 | * the Free Software Foundation; either version 2 of the License, or |
||
7 | * (at your option) any later version. |
||
8 | * |
||
9 | * This program is distributed in the hope that it will be useful, |
||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
12 | * GNU General Public License for more details. |
||
13 | * |
||
14 | * You should have received a copy of the GNU General Public License |
||
15 | * along with this program; if not, write to the Free Software |
||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
||
17 | * |
||
18 | * Copyright (C) IBM Corporation, 2001 |
||
19 | * |
||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
||
21 | * |
||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> |
||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
||
24 | * Papers: |
||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
||
27 | * |
||
28 | * For detailed explanation of Read-Copy Update mechanism see - |
||
29 | * http://lse.sourceforge.net/locking/rcupdate.html |
||
30 | * |
||
31 | */ |
||
32 | |||
33 | #ifndef __LINUX_RCUPDATE_H |
||
34 | #define __LINUX_RCUPDATE_H |
||
35 | |||
36 | #ifdef __KERNEL__ |
||
37 | |||
38 | #include <linux/cache.h> |
||
39 | #include <linux/list.h> |
||
40 | #include <linux/spinlock.h> |
||
41 | #include <linux/threads.h> |
||
42 | #include <linux/percpu.h> |
||
43 | #include <linux/cpumask.h> |
||
44 | |||
45 | /** |
||
46 | * struct rcu_head - callback structure for use with RCU |
||
47 | * @list: list_head to queue the update requests |
||
48 | * @func: actual update function to call after the grace period. |
||
49 | * @arg: argument to be passed to the actual update function. |
||
50 | */ |
||
51 | struct rcu_head { |
||
52 | struct list_head list; |
||
53 | void (*func)(void *obj); |
||
54 | void *arg; |
||
55 | }; |
||
56 | |||
57 | #define RCU_HEAD_INIT(head) \ |
||
58 | { list: LIST_HEAD_INIT(head.list), func: NULL, arg: NULL } |
||
59 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head) |
||
60 | #define INIT_RCU_HEAD(ptr) do { \ |
||
61 | INIT_LIST_HEAD(&(ptr)->list); (ptr)->func = NULL; (ptr)->arg = NULL; \ |
||
62 | } while (0) |
||
63 | |||
64 | |||
65 | |||
66 | /* Control variables for rcupdate callback mechanism. */ |
||
67 | struct rcu_ctrlblk { |
||
68 | spinlock_t mutex; /* Guard this struct */ |
||
69 | long curbatch; /* Current batch number. */ |
||
70 | long maxbatch; /* Max requested batch number. */ |
||
71 | cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */ |
||
72 | /* for current batch to proceed. */ |
||
73 | }; |
||
74 | |||
75 | /* Is batch a before batch b ? */ |
||
76 | static inline int rcu_batch_before(long a, long b) |
||
77 | { |
||
78 | return (a - b) < 0; |
||
79 | } |
||
80 | |||
81 | /* Is batch a after batch b ? */ |
||
82 | static inline int rcu_batch_after(long a, long b) |
||
83 | { |
||
84 | return (a - b) > 0; |
||
85 | } |
||
86 | |||
87 | /* |
||
88 | * Per-CPU data for Read-Copy UPdate. |
||
89 | * nxtlist - new callbacks are added here |
||
90 | * curlist - current batch for which quiescent cycle started if any |
||
91 | */ |
||
92 | struct rcu_data { |
||
93 | long qsctr; /* User-mode/idle loop etc. */ |
||
94 | long last_qsctr; /* value of qsctr at beginning */ |
||
95 | /* of rcu grace period */ |
||
96 | long batch; /* Batch # for current RCU batch */ |
||
97 | struct list_head nxtlist; |
||
98 | struct list_head curlist; |
||
99 | }; |
||
100 | |||
101 | DECLARE_PER_CPU(struct rcu_data, rcu_data); |
||
102 | extern struct rcu_ctrlblk rcu_ctrlblk; |
||
103 | |||
104 | #define RCU_qsctr(cpu) (per_cpu(rcu_data, (cpu)).qsctr) |
||
105 | #define RCU_last_qsctr(cpu) (per_cpu(rcu_data, (cpu)).last_qsctr) |
||
106 | #define RCU_batch(cpu) (per_cpu(rcu_data, (cpu)).batch) |
||
107 | #define RCU_nxtlist(cpu) (per_cpu(rcu_data, (cpu)).nxtlist) |
||
108 | #define RCU_curlist(cpu) (per_cpu(rcu_data, (cpu)).curlist) |
||
109 | |||
110 | #define RCU_QSCTR_INVALID 0 |
||
111 | |||
112 | static inline int rcu_pending(int cpu) |
||
113 | { |
||
1056 | tullio | 114 | /* |
422 | giacomo | 115 | if ((!list_empty(&RCU_curlist(cpu)) && |
116 | rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) || |
||
117 | (list_empty(&RCU_curlist(cpu)) && |
||
118 | !list_empty(&RCU_nxtlist(cpu))) || |
||
119 | cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) |
||
120 | return 1; |
||
121 | else |
||
122 | return 0; |
||
1056 | tullio | 123 | */ |
124 | return(1); |
||
422 | giacomo | 125 | } |
126 | |||
127 | #define rcu_read_lock() preempt_disable() |
||
128 | #define rcu_read_unlock() preempt_enable() |
||
129 | |||
130 | extern void rcu_init(void); |
||
131 | extern void rcu_check_callbacks(int cpu, int user); |
||
132 | |||
133 | /* Exported interfaces */ |
||
134 | extern void FASTCALL(call_rcu(struct rcu_head *head, |
||
135 | void (*func)(void *arg), void *arg)); |
||
136 | extern void synchronize_kernel(void); |
||
137 | |||
138 | #endif /* __KERNEL__ */ |
||
139 | #endif /* __LINUX_RCUPDATE_H */ |