Rev 629 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
629 | giacomo | 1 | #ifndef __i386_UACCESS_H |
2 | #define __i386_UACCESS_H |
||
3 | |||
4 | // non compilo nulla!!!! |
||
5 | |||
6 | #if 0 |
||
7 | |||
8 | /* |
||
9 | * User space memory access functions |
||
10 | */ |
||
11 | #include <linux/config.h> |
||
12 | #include <linux/sched.h> |
||
13 | #include <asm/page.h> |
||
14 | |||
15 | #define VERIFY_READ 0 |
||
16 | #define VERIFY_WRITE 1 |
||
17 | |||
18 | /* |
||
19 | * The fs value determines whether argument validity checking should be |
||
20 | * performed or not. If get_fs() == USER_DS, checking is performed, with |
||
21 | * get_fs() == KERNEL_DS, checking is bypassed. |
||
22 | * |
||
23 | * For historical reasons, these macros are grossly misnamed. |
||
24 | */ |
||
25 | |||
26 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
||
27 | |||
28 | |||
29 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) |
||
30 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) |
||
31 | |||
32 | #define get_ds() (KERNEL_DS) |
||
33 | #define get_fs() (current->addr_limit) |
||
34 | #define set_fs(x) (current->addr_limit = (x)) |
||
35 | |||
36 | #define segment_eq(a,b) ((a).seg == (b).seg) |
||
37 | |||
38 | extern int __verify_write(const void *, unsigned long); |
||
39 | |||
40 | #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg)) |
||
41 | |||
42 | /* |
||
43 | * Uhhuh, this needs 33-bit arithmetic. We have a carry.. |
||
44 | */ |
||
45 | #define __range_ok(addr,size) ({ \ |
||
46 | unsigned long flag,sum; \ |
||
47 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ |
||
48 | :"=&r" (flag), "=r" (sum) \ |
||
49 | :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \ |
||
50 | flag; }) |
||
51 | |||
52 | #ifdef CONFIG_X86_WP_WORKS_OK |
||
53 | |||
54 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) |
||
55 | |||
56 | #else |
||
57 | |||
58 | #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \ |
||
59 | ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \ |
||
60 | segment_eq(get_fs(),KERNEL_DS) || \ |
||
61 | __verify_write((void *)(addr),(size)))) |
||
62 | |||
63 | #endif /* CPU */ |
||
64 | |||
65 | extern inline int verify_area(int type, const void * addr, unsigned long size) |
||
66 | { |
||
67 | return access_ok(type,addr,size) ? 0 : -EFAULT; |
||
68 | } |
||
69 | |||
70 | |||
71 | /* |
||
72 | * The exception table consists of pairs of addresses: the first is the |
||
73 | * address of an instruction that is allowed to fault, and the second is |
||
74 | * the address at which the program should continue. No registers are |
||
75 | * modified, so it is entirely up to the continuation code to figure out |
||
76 | * what to do. |
||
77 | * |
||
78 | * All the routines below use bits of fixup code that are out of line |
||
79 | * with the main instruction path. This means when everything is well, |
||
80 | * we don't even have to jump over them. Further, they do not intrude |
||
81 | * on our cache or tlb entries. |
||
82 | */ |
||
83 | |||
84 | struct exception_table_entry |
||
85 | { |
||
86 | unsigned long insn, fixup; |
||
87 | }; |
||
88 | |||
89 | /* Returns 0 if exception not found and fixup otherwise. */ |
||
90 | extern unsigned long search_exception_table(unsigned long); |
||
91 | |||
92 | |||
93 | /* |
||
94 | * These are the main single-value transfer routines. They automatically |
||
95 | * use the right size if we just have the right pointer type. |
||
96 | * |
||
97 | * This gets kind of ugly. We want to return _two_ values in "get_user()" |
||
98 | * and yet we don't want to do any pointers, because that is too much |
||
99 | * of a performance impact. Thus we have a few rather ugly macros here, |
||
100 | * and hide all the uglyness from the user. |
||
101 | * |
||
102 | * The "__xxx" versions of the user access functions are versions that |
||
103 | * do not verify the address space, that must have been done previously |
||
104 | * with a separate "access_ok()" call (this is used when we do multiple |
||
105 | * accesses to the same area of user memory). |
||
106 | */ |
||
107 | |||
108 | extern void __get_user_1(void); |
||
109 | extern void __get_user_2(void); |
||
110 | extern void __get_user_4(void); |
||
111 | |||
112 | #define __get_user_x(size,ret,x,ptr) \ |
||
113 | __asm__ __volatile__("call __get_user_" #size \ |
||
114 | :"=a" (ret),"=d" (x) \ |
||
115 | :"0" (ptr)) |
||
116 | |||
117 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ |
||
118 | #define get_user(x,ptr) \ |
||
119 | ({ int __ret_gu,__val_gu; \ |
||
120 | switch(sizeof (*(ptr))) { \ |
||
121 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ |
||
122 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ |
||
123 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ |
||
124 | default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ |
||
125 | } \ |
||
126 | (x) = (__typeof__(*(ptr)))__val_gu; \ |
||
127 | __ret_gu; \ |
||
128 | }) |
||
129 | |||
130 | extern void __put_user_1(void); |
||
131 | extern void __put_user_2(void); |
||
132 | extern void __put_user_4(void); |
||
133 | |||
134 | extern void __put_user_bad(void); |
||
135 | |||
136 | #define __put_user_x(size,ret,x,ptr) \ |
||
137 | __asm__ __volatile__("call __put_user_" #size \ |
||
138 | :"=a" (ret) \ |
||
139 | :"0" (ptr),"d" (x) \ |
||
140 | :"cx") |
||
141 | |||
142 | #define put_user(x,ptr) \ |
||
143 | ({ int __ret_pu; \ |
||
144 | switch(sizeof (*(ptr))) { \ |
||
145 | case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \ |
||
146 | case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \ |
||
147 | case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \ |
||
148 | default: __put_user_x(X,__ret_pu,x,ptr); break; \ |
||
149 | } \ |
||
150 | __ret_pu; \ |
||
151 | }) |
||
152 | |||
153 | #define __get_user(x,ptr) \ |
||
154 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) |
||
155 | #define __put_user(x,ptr) \ |
||
156 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) |
||
157 | |||
158 | #define __put_user_nocheck(x,ptr,size) \ |
||
159 | ({ \ |
||
160 | long __pu_err; \ |
||
161 | __put_user_size((x),(ptr),(size),__pu_err); \ |
||
162 | __pu_err; \ |
||
163 | }) |
||
164 | |||
165 | #define __put_user_size(x,ptr,size,retval) \ |
||
166 | do { \ |
||
167 | retval = 0; \ |
||
168 | switch (size) { \ |
||
169 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \ |
||
170 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \ |
||
171 | case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \ |
||
172 | default: __put_user_bad(); \ |
||
173 | } \ |
||
174 | } while (0) |
||
175 | |||
176 | struct __large_struct { unsigned long buf[100]; }; |
||
177 | #define __m(x) (*(struct __large_struct *)(x)) |
||
178 | |||
179 | /* |
||
180 | * Tell gcc we read from memory instead of writing: this is because |
||
181 | * we do not write to any memory gcc knows about, so there are no |
||
182 | * aliasing issues. |
||
183 | */ |
||
184 | #define __put_user_asm(x, addr, err, itype, rtype, ltype) \ |
||
185 | __asm__ __volatile__( \ |
||
186 | "1: mov"itype" %"rtype"1,%2\n" \ |
||
187 | "2:\n" \ |
||
188 | ".section .fixup,\"ax\"\n" \ |
||
189 | "3: movl %3,%0\n" \ |
||
190 | " jmp 2b\n" \ |
||
191 | ".previous\n" \ |
||
192 | ".section __ex_table,\"a\"\n" \ |
||
193 | " .align 4\n" \ |
||
194 | " .long 1b,3b\n" \ |
||
195 | ".previous" \ |
||
196 | : "=r"(err) \ |
||
197 | : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err)) |
||
198 | |||
199 | |||
200 | #define __get_user_nocheck(x,ptr,size) \ |
||
201 | ({ \ |
||
202 | long __gu_err, __gu_val; \ |
||
203 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ |
||
204 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
||
205 | __gu_err; \ |
||
206 | }) |
||
207 | |||
208 | extern long __get_user_bad(void); |
||
209 | |||
210 | #define __get_user_size(x,ptr,size,retval) \ |
||
211 | do { \ |
||
212 | retval = 0; \ |
||
213 | switch (size) { \ |
||
214 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \ |
||
215 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \ |
||
216 | case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \ |
||
217 | default: (x) = __get_user_bad(); \ |
||
218 | } \ |
||
219 | } while (0) |
||
220 | |||
221 | #define __get_user_asm(x, addr, err, itype, rtype, ltype) \ |
||
222 | __asm__ __volatile__( \ |
||
223 | "1: mov"itype" %2,%"rtype"1\n" \ |
||
224 | "2:\n" \ |
||
225 | ".section .fixup,\"ax\"\n" \ |
||
226 | "3: movl %3,%0\n" \ |
||
227 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
||
228 | " jmp 2b\n" \ |
||
229 | ".previous\n" \ |
||
230 | ".section __ex_table,\"a\"\n" \ |
||
231 | " .align 4\n" \ |
||
232 | " .long 1b,3b\n" \ |
||
233 | ".previous" \ |
||
234 | : "=r"(err), ltype (x) \ |
||
235 | : "m"(__m(addr)), "i"(-EFAULT), "0"(err)) |
||
236 | |||
237 | /* |
||
238 | * The "xxx_ret" versions return constant specified in third argument, if |
||
239 | * something bad happens. These macros can be optimized for the |
||
240 | * case of just returning from the function xxx_ret is used. |
||
241 | */ |
||
242 | |||
243 | #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; }) |
||
244 | |||
245 | #define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; }) |
||
246 | |||
247 | #define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; }) |
||
248 | |||
249 | #define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; }) |
||
250 | |||
251 | |||
252 | /* |
||
253 | * Copy To/From Userspace |
||
254 | */ |
||
255 | |||
256 | /* Generic arbitrary sized copy. */ |
||
257 | #define __copy_user(to,from,size) \ |
||
258 | do { \ |
||
259 | int __d0, __d1; \ |
||
260 | __asm__ __volatile__( \ |
||
261 | "0: rep; movsl\n" \ |
||
262 | " movl %3,%0\n" \ |
||
263 | "1: rep; movsb\n" \ |
||
264 | "2:\n" \ |
||
265 | ".section .fixup,\"ax\"\n" \ |
||
266 | "3: lea 0(%3,%0,4),%0\n" \ |
||
267 | " jmp 2b\n" \ |
||
268 | ".previous\n" \ |
||
269 | ".section __ex_table,\"a\"\n" \ |
||
270 | " .align 4\n" \ |
||
271 | " .long 0b,3b\n" \ |
||
272 | " .long 1b,2b\n" \ |
||
273 | ".previous" \ |
||
274 | : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ |
||
275 | : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ |
||
276 | : "memory"); \ |
||
277 | } while (0) |
||
278 | |||
279 | #define __copy_user_zeroing(to,from,size) \ |
||
280 | do { \ |
||
281 | int __d0, __d1; \ |
||
282 | __asm__ __volatile__( \ |
||
283 | "0: rep; movsl\n" \ |
||
284 | " movl %3,%0\n" \ |
||
285 | "1: rep; movsb\n" \ |
||
286 | "2:\n" \ |
||
287 | ".section .fixup,\"ax\"\n" \ |
||
288 | "3: lea 0(%3,%0,4),%0\n" \ |
||
289 | "4: pushl %0\n" \ |
||
290 | " pushl %%eax\n" \ |
||
291 | " xorl %%eax,%%eax\n" \ |
||
292 | " rep; stosb\n" \ |
||
293 | " popl %%eax\n" \ |
||
294 | " popl %0\n" \ |
||
295 | " jmp 2b\n" \ |
||
296 | ".previous\n" \ |
||
297 | ".section __ex_table,\"a\"\n" \ |
||
298 | " .align 4\n" \ |
||
299 | " .long 0b,3b\n" \ |
||
300 | " .long 1b,4b\n" \ |
||
301 | ".previous" \ |
||
302 | : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ |
||
303 | : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ |
||
304 | : "memory"); \ |
||
305 | } while (0) |
||
306 | |||
307 | /* We let the __ versions of copy_from/to_user inline, because they're often |
||
308 | * used in fast paths and have only a small space overhead. |
||
309 | */ |
||
310 | static inline unsigned long |
||
311 | __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) |
||
312 | { |
||
313 | __copy_user_zeroing(to,from,n); |
||
314 | return n; |
||
315 | } |
||
316 | |||
317 | static inline unsigned long |
||
318 | __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) |
||
319 | { |
||
320 | __copy_user(to,from,n); |
||
321 | return n; |
||
322 | } |
||
323 | |||
324 | |||
325 | /* Optimize just a little bit when we know the size of the move. */ |
||
326 | #define __constant_copy_user(to, from, size) \ |
||
327 | do { \ |
||
328 | int __d0, __d1; \ |
||
329 | switch (size & 3) { \ |
||
330 | default: \ |
||
331 | __asm__ __volatile__( \ |
||
332 | "0: rep; movsl\n" \ |
||
333 | "1:\n" \ |
||
334 | ".section .fixup,\"ax\"\n" \ |
||
335 | "2: shl $2,%0\n" \ |
||
336 | " jmp 1b\n" \ |
||
337 | ".previous\n" \ |
||
338 | ".section __ex_table,\"a\"\n" \ |
||
339 | " .align 4\n" \ |
||
340 | " .long 0b,2b\n" \ |
||
341 | ".previous" \ |
||
342 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
343 | : "1"(from), "2"(to), "0"(size/4) \ |
||
344 | : "memory"); \ |
||
345 | break; \ |
||
346 | case 1: \ |
||
347 | __asm__ __volatile__( \ |
||
348 | "0: rep; movsl\n" \ |
||
349 | "1: movsb\n" \ |
||
350 | "2:\n" \ |
||
351 | ".section .fixup,\"ax\"\n" \ |
||
352 | "3: shl $2,%0\n" \ |
||
353 | "4: incl %0\n" \ |
||
354 | " jmp 2b\n" \ |
||
355 | ".previous\n" \ |
||
356 | ".section __ex_table,\"a\"\n" \ |
||
357 | " .align 4\n" \ |
||
358 | " .long 0b,3b\n" \ |
||
359 | " .long 1b,4b\n" \ |
||
360 | ".previous" \ |
||
361 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
362 | : "1"(from), "2"(to), "0"(size/4) \ |
||
363 | : "memory"); \ |
||
364 | break; \ |
||
365 | case 2: \ |
||
366 | __asm__ __volatile__( \ |
||
367 | "0: rep; movsl\n" \ |
||
368 | "1: movsw\n" \ |
||
369 | "2:\n" \ |
||
370 | ".section .fixup,\"ax\"\n" \ |
||
371 | "3: shl $2,%0\n" \ |
||
372 | "4: addl $2,%0\n" \ |
||
373 | " jmp 2b\n" \ |
||
374 | ".previous\n" \ |
||
375 | ".section __ex_table,\"a\"\n" \ |
||
376 | " .align 4\n" \ |
||
377 | " .long 0b,3b\n" \ |
||
378 | " .long 1b,4b\n" \ |
||
379 | ".previous" \ |
||
380 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
381 | : "1"(from), "2"(to), "0"(size/4) \ |
||
382 | : "memory"); \ |
||
383 | break; \ |
||
384 | case 3: \ |
||
385 | __asm__ __volatile__( \ |
||
386 | "0: rep; movsl\n" \ |
||
387 | "1: movsw\n" \ |
||
388 | "2: movsb\n" \ |
||
389 | "3:\n" \ |
||
390 | ".section .fixup,\"ax\"\n" \ |
||
391 | "4: shl $2,%0\n" \ |
||
392 | "5: addl $2,%0\n" \ |
||
393 | "6: incl %0\n" \ |
||
394 | " jmp 3b\n" \ |
||
395 | ".previous\n" \ |
||
396 | ".section __ex_table,\"a\"\n" \ |
||
397 | " .align 4\n" \ |
||
398 | " .long 0b,4b\n" \ |
||
399 | " .long 1b,5b\n" \ |
||
400 | " .long 2b,6b\n" \ |
||
401 | ".previous" \ |
||
402 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
403 | : "1"(from), "2"(to), "0"(size/4) \ |
||
404 | : "memory"); \ |
||
405 | break; \ |
||
406 | } \ |
||
407 | } while (0) |
||
408 | |||
409 | /* Optimize just a little bit when we know the size of the move. */ |
||
410 | #define __constant_copy_user_zeroing(to, from, size) \ |
||
411 | do { \ |
||
412 | int __d0, __d1; \ |
||
413 | switch (size & 3) { \ |
||
414 | default: \ |
||
415 | __asm__ __volatile__( \ |
||
416 | "0: rep; movsl\n" \ |
||
417 | "1:\n" \ |
||
418 | ".section .fixup,\"ax\"\n" \ |
||
419 | "2: pushl %0\n" \ |
||
420 | " pushl %%eax\n" \ |
||
421 | " xorl %%eax,%%eax\n" \ |
||
422 | " rep; stosl\n" \ |
||
423 | " popl %%eax\n" \ |
||
424 | " popl %0\n" \ |
||
425 | " shl $2,%0\n" \ |
||
426 | " jmp 1b\n" \ |
||
427 | ".previous\n" \ |
||
428 | ".section __ex_table,\"a\"\n" \ |
||
429 | " .align 4\n" \ |
||
430 | " .long 0b,2b\n" \ |
||
431 | ".previous" \ |
||
432 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
433 | : "1"(from), "2"(to), "0"(size/4) \ |
||
434 | : "memory"); \ |
||
435 | break; \ |
||
436 | case 1: \ |
||
437 | __asm__ __volatile__( \ |
||
438 | "0: rep; movsl\n" \ |
||
439 | "1: movsb\n" \ |
||
440 | "2:\n" \ |
||
441 | ".section .fixup,\"ax\"\n" \ |
||
442 | "3: pushl %0\n" \ |
||
443 | " pushl %%eax\n" \ |
||
444 | " xorl %%eax,%%eax\n" \ |
||
445 | " rep; stosl\n" \ |
||
446 | " stosb\n" \ |
||
447 | " popl %%eax\n" \ |
||
448 | " popl %0\n" \ |
||
449 | " shl $2,%0\n" \ |
||
450 | " incl %0\n" \ |
||
451 | " jmp 2b\n" \ |
||
452 | "4: pushl %%eax\n" \ |
||
453 | " xorl %%eax,%%eax\n" \ |
||
454 | " stosb\n" \ |
||
455 | " popl %%eax\n" \ |
||
456 | " incl %0\n" \ |
||
457 | " jmp 2b\n" \ |
||
458 | ".previous\n" \ |
||
459 | ".section __ex_table,\"a\"\n" \ |
||
460 | " .align 4\n" \ |
||
461 | " .long 0b,3b\n" \ |
||
462 | " .long 1b,4b\n" \ |
||
463 | ".previous" \ |
||
464 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
465 | : "1"(from), "2"(to), "0"(size/4) \ |
||
466 | : "memory"); \ |
||
467 | break; \ |
||
468 | case 2: \ |
||
469 | __asm__ __volatile__( \ |
||
470 | "0: rep; movsl\n" \ |
||
471 | "1: movsw\n" \ |
||
472 | "2:\n" \ |
||
473 | ".section .fixup,\"ax\"\n" \ |
||
474 | "3: pushl %0\n" \ |
||
475 | " pushl %%eax\n" \ |
||
476 | " xorl %%eax,%%eax\n" \ |
||
477 | " rep; stosl\n" \ |
||
478 | " stosw\n" \ |
||
479 | " popl %%eax\n" \ |
||
480 | " popl %0\n" \ |
||
481 | " shl $2,%0\n" \ |
||
482 | " addl $2,%0\n" \ |
||
483 | " jmp 2b\n" \ |
||
484 | "4: pushl %%eax\n" \ |
||
485 | " xorl %%eax,%%eax\n" \ |
||
486 | " stosw\n" \ |
||
487 | " popl %%eax\n" \ |
||
488 | " addl $2,%0\n" \ |
||
489 | " jmp 2b\n" \ |
||
490 | ".previous\n" \ |
||
491 | ".section __ex_table,\"a\"\n" \ |
||
492 | " .align 4\n" \ |
||
493 | " .long 0b,3b\n" \ |
||
494 | " .long 1b,4b\n" \ |
||
495 | ".previous" \ |
||
496 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
497 | : "1"(from), "2"(to), "0"(size/4) \ |
||
498 | : "memory"); \ |
||
499 | break; \ |
||
500 | case 3: \ |
||
501 | __asm__ __volatile__( \ |
||
502 | "0: rep; movsl\n" \ |
||
503 | "1: movsw\n" \ |
||
504 | "2: movsb\n" \ |
||
505 | "3:\n" \ |
||
506 | ".section .fixup,\"ax\"\n" \ |
||
507 | "4: pushl %0\n" \ |
||
508 | " pushl %%eax\n" \ |
||
509 | " xorl %%eax,%%eax\n" \ |
||
510 | " rep; stosl\n" \ |
||
511 | " stosw\n" \ |
||
512 | " stosb\n" \ |
||
513 | " popl %%eax\n" \ |
||
514 | " popl %0\n" \ |
||
515 | " shl $2,%0\n" \ |
||
516 | " addl $3,%0\n" \ |
||
517 | " jmp 2b\n" \ |
||
518 | "5: pushl %%eax\n" \ |
||
519 | " xorl %%eax,%%eax\n" \ |
||
520 | " stosw\n" \ |
||
521 | " stosb\n" \ |
||
522 | " popl %%eax\n" \ |
||
523 | " addl $3,%0\n" \ |
||
524 | " jmp 2b\n" \ |
||
525 | "6: pushl %%eax\n" \ |
||
526 | " xorl %%eax,%%eax\n" \ |
||
527 | " stosb\n" \ |
||
528 | " popl %%eax\n" \ |
||
529 | " incl %0\n" \ |
||
530 | " jmp 2b\n" \ |
||
531 | ".previous\n" \ |
||
532 | ".section __ex_table,\"a\"\n" \ |
||
533 | " .align 4\n" \ |
||
534 | " .long 0b,4b\n" \ |
||
535 | " .long 1b,5b\n" \ |
||
536 | " .long 2b,6b\n" \ |
||
537 | ".previous" \ |
||
538 | : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ |
||
539 | : "1"(from), "2"(to), "0"(size/4) \ |
||
540 | : "memory"); \ |
||
541 | break; \ |
||
542 | } \ |
||
543 | } while (0) |
||
544 | |||
545 | unsigned long __generic_copy_to_user(void *, const void *, unsigned long); |
||
546 | unsigned long __generic_copy_from_user(void *, const void *, unsigned long); |
||
547 | |||
548 | static inline unsigned long |
||
549 | __constant_copy_to_user(void *to, const void *from, unsigned long n) |
||
550 | { |
||
551 | if (access_ok(VERIFY_WRITE, to, n)) |
||
552 | __constant_copy_user(to,from,n); |
||
553 | return n; |
||
554 | } |
||
555 | |||
556 | static inline unsigned long |
||
557 | __constant_copy_from_user(void *to, const void *from, unsigned long n) |
||
558 | { |
||
559 | if (access_ok(VERIFY_READ, from, n)) |
||
560 | __constant_copy_user_zeroing(to,from,n); |
||
561 | return n; |
||
562 | } |
||
563 | |||
564 | static inline unsigned long |
||
565 | __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n) |
||
566 | { |
||
567 | __constant_copy_user(to,from,n); |
||
568 | return n; |
||
569 | } |
||
570 | |||
571 | static inline unsigned long |
||
572 | __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n) |
||
573 | { |
||
574 | __constant_copy_user_zeroing(to,from,n); |
||
575 | return n; |
||
576 | } |
||
577 | |||
578 | #define copy_to_user(to,from,n) \ |
||
579 | (__builtin_constant_p(n) ? \ |
||
580 | __constant_copy_to_user((to),(from),(n)) : \ |
||
581 | __generic_copy_to_user((to),(from),(n))) |
||
582 | |||
583 | #define copy_from_user(to,from,n) \ |
||
584 | (__builtin_constant_p(n) ? \ |
||
585 | __constant_copy_from_user((to),(from),(n)) : \ |
||
586 | __generic_copy_from_user((to),(from),(n))) |
||
587 | |||
588 | #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; }) |
||
589 | |||
590 | #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; }) |
||
591 | |||
592 | #define __copy_to_user(to,from,n) \ |
||
593 | (__builtin_constant_p(n) ? \ |
||
594 | __constant_copy_to_user_nocheck((to),(from),(n)) : \ |
||
595 | __generic_copy_to_user_nocheck((to),(from),(n))) |
||
596 | |||
597 | #define __copy_from_user(to,from,n) \ |
||
598 | (__builtin_constant_p(n) ? \ |
||
599 | __constant_copy_from_user_nocheck((to),(from),(n)) : \ |
||
600 | __generic_copy_from_user_nocheck((to),(from),(n))) |
||
601 | |||
602 | long strncpy_from_user(char *dst, const char *src, long count); |
||
603 | long __strncpy_from_user(char *dst, const char *src, long count); |
||
604 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) |
||
605 | long strnlen_user(const char *str, long n); |
||
606 | unsigned long clear_user(void *mem, unsigned long len); |
||
607 | unsigned long __clear_user(void *mem, unsigned long len); |
||
608 | |||
609 | #endif |
||
610 | #endif /* __i386_UACCESS_H */ |