Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/* Project:     OSLib
2
 * Description: The OS Construction Kit
3
 * Date:                1.6.2000
4
 * Idea by:             Luca Abeni & Gerardo Lamastra
5
 *
6
 * OSLib is an SO project aimed at developing a common, easy-to-use
7
 * low-level infrastructure for developing OS kernels and Embedded
8
 * Applications; it partially derives from the HARTIK project but it
9
 * currently is independently developed.
10
 *
11
 * OSLib is distributed under GPL License, and some of its code has
12
 * been derived from the Linux kernel source; also some important
13
 * ideas come from studying the DJGPP go32 extender.
14
 *
15
 * We acknowledge the Linux Community, Free Software Foundation,
16
 * D.J. Delorie and all the other developers who believe in the
17
 * freedom of software and ideas.
18
 *
19
 * For legalese, check out the included GPL license.
20
 */
21
 
22
/*      As the name says... All the hardware-dependent instructions
23
        there is a 1->1 corrispondence with ASM instructions    */
24
 
25
#ifndef __LL_I386_HW_INSTR_H__
26
#define __LL_I386_HW_INSTR_H__
27
 
28
#include <ll/i386/defs.h>
29
BEGIN_DEF
30
 
31
#define INLINE_OP __inline__ static
32
 
33
#include <ll/i386/hw-data.h>
34
 
35
/* Low Level I/O funcs are in a separate file (by Luca) */
36
#include <ll/i386/hw-io.h>
37
 
38
INLINE_OP WORD get_CS(void)
39
{WORD r; __asm__ __volatile__ ("movw %%cs,%0" : "=q" (r)); return(r);}
40
 
41
INLINE_OP WORD get_DS(void)
42
{WORD r; __asm__ __volatile__ ("movw %%ds,%0" : "=q" (r)); return(r);}
43
INLINE_OP WORD get_FS(void)
44
{WORD r; __asm__ __volatile__ ("movw %%fs,%0" : "=q" (r)); return(r);}
45
 
46
/*INLINE_OP DWORD get_SP(void)
47
{DWORD r; __asm__ __volatile__ ("movw %%esp,%0" : "=q" (r)); return(r);}*/
48
INLINE_OP DWORD get_SP(void)
49
{
50
    DWORD rv;
51
    __asm__ __volatile__ ("movl %%esp, %0"
52
          : "=a" (rv));
53
    return(rv);
54
}
55
 
56
INLINE_OP DWORD get_BP(void)
57
{
58
    DWORD rv;
59
    __asm__ __volatile__ ("movl %%ebp, %0"
60
          : "=a" (rv));
61
    return(rv);
62
}
63
 
64
INLINE_OP WORD get_TR(void)
65
{WORD r; __asm__ __volatile__ ("strw %0" : "=q" (r)); return(r); }
66
 
67
INLINE_OP void set_TR(WORD n)
68
{__asm__ __volatile__("ltr %%ax": /* no output */ :"a" (n)); }
69
 
70
INLINE_OP void set_LDTR(WORD addr)
71
{ __asm__ __volatile__("lldt %%ax": /* no output */ :"a" (addr)); }
72
 
73
 
74
/* Clear Task Switched Flag! Used for FPU preemtion */
75
INLINE_OP void clts(void)
76
{__asm__ __volatile__ ("clts"); }
77
 
78
/* Halt the processor! */
79
INLINE_OP void hlt(void)
80
{__asm__ __volatile__ ("hlt"); }
81
 
82
/* These functions are used to mask/unmask interrupts           */
83
INLINE_OP void sti(void) {__asm__ __volatile__ ("sti"); }
84
INLINE_OP void cli(void) {__asm__ __volatile__ ("cli"); }
85
 
86
INLINE_OP SYS_FLAGS ll_fsave(void)
87
{
88
    SYS_FLAGS result;
89
 
90
    __asm__ __volatile__ ("pushfl");
91
    __asm__ __volatile__ ("cli");
92
    __asm__ __volatile__ ("popl %eax");
93
    __asm__ __volatile__ ("movl %%eax,%0"
94
        : "=r" (result)
95
        :
96
        : "eax" );
97
    return(result);
98
}
99
 
100
INLINE_OP void ll_frestore(SYS_FLAGS f)
101
{
102
    __asm__ __volatile__ ("mov %0,%%eax"
103
        :
104
        : "r" (f)
105
        : "eax");
106
    __asm__ __volatile__ ("pushl %eax");
107
    __asm__ __volatile__ ("popfl");
108
}
109
 
110
/*
111
    FPU context switch management functions!
112
    FPU management exported at kernel layer to allow the use
113
    of floating point in kernel primitives; this turns to be
114
    useful for bandwidth reservation or guarantee!
115
*/
116
 
117
/* FPU lazy state save handling.. */
118
INLINE_OP void save_fpu(TSS *t)
119
{
120
    __asm__ __volatile__("fnsave %0\n\tfwait":"=m" (t->ctx_FPU));
121
}
122
 
123
INLINE_OP void restore_fpu(TSS *t)
124
{
125
#if 1
126
    __asm__ __volatile__("frstor %0": :"m" (t->ctx_FPU));
127
#else
128
    __asm__ __volatile__("frstor %0\n\tfwait": :"m" (t->ctx_FPU));
129
#endif
130
/*    __asm__ __volatile__("frstor _LL_FPU_savearea"); */
131
}
132
 
133
INLINE_OP void smartsave_fpu(TSS *t)
134
{
135
    if (t->control & FPU_USED) save_fpu(t);
136
}
137
 
138
INLINE_OP void reset_fpu(void) { __asm__ __volatile__ ("fninit"); }
139
 
140
#if 0
141
/* OK, now everything is clear... We test the NE bit to see if the
142
 * CPU is using the internal mechanism for reporting FPU errors or not...
143
 */
144
INLINE_OP int check_fpu(void)
145
{
146
    int result;
147
 
148
    __asm__ __volatile__ ("movl %cr0,%eax");
149
    __asm__ __volatile__ ("movl %eax,%edi");
150
    __asm__ __volatile__ ("andl $0x0FFFFFFEF,%eax");
151
    __asm__ __volatile__ ("movl %eax,%cr0");
152
    __asm__ __volatile__ ("movl %cr0,%eax");
153
    __asm__ __volatile__ ("xchgl %edi,%eax");
154
    __asm__ __volatile__ ("movl %eax,%cr0");
155
#if 0
156
    __asm__ __volatile__ ("xorl %eax,%eax");
157
    __asm__ __volatile__ ("movb %bl,%al");
158
#else
159
    __asm__ __volatile__ ("movl %edi,%eax");
160
    __asm__ __volatile__ ("andl $0x10,%eax");
161
#endif
162
    __asm__ __volatile__ ("shrb $4,%al");
163
    __asm__ __volatile__ ("movl %%eax,%0"
164
        : "=r" (result)
165
        :
166
        : "eax" );
167
    return(result);
168
}
169
#endif
170
 
171
INLINE_OP void init_fpu(void)
172
{
173
    __asm__ __volatile__ ("movl %cr0,%eax");
174
    __asm__ __volatile__ ("orl  $34,%eax");
175
    __asm__ __volatile__ ("movl %eax,%cr0");
176
    __asm__ __volatile__ ("fninit");
177
}
178
 
179
 
180
extern BYTE LL_FPU_savearea[];
181
 
182
extern __inline__ void LL_FPU_save(void)
183
{
184
    #ifdef __LINUX__
185
        __asm__ __volatile__ ("fsave LL_FPU_savearea");
186
    #else
187
        __asm__ __volatile__ ("fsave _LL_FPU_savearea");
188
    #endif
189
}
190
 
191
extern __inline__ void LL_FPU_restore(void)
192
{
193
    #ifdef __LINUX__
194
        __asm__ __volatile__ ("frstor LL_FPU_savearea");
195
    #else
196
        __asm__ __volatile__ ("frstor _LL_FPU_savearea");
197
    #endif
198
}
199
 
200
 
201
 
202
 
203
 
204
INLINE_OP void lmempokeb(LIN_ADDR a, BYTE v)
205
{
206
        *((BYTE *)a) = v;
207
}
208
INLINE_OP void lmempokew(LIN_ADDR a, WORD v)
209
{
210
        *((WORD *)a) = v;
211
}
212
INLINE_OP void lmempoked(LIN_ADDR a, DWORD v)
213
{
214
        *((DWORD *)a) = v;
215
}
216
 
217
INLINE_OP BYTE lmempeekb(LIN_ADDR a)
218
{
219
        return *((BYTE *)a);
220
}
221
INLINE_OP WORD lmempeekw(LIN_ADDR a)
222
{
223
        return *((WORD *)a);
224
}
225
INLINE_OP DWORD lmempeekd(LIN_ADDR a)
226
{
227
        return *((DWORD *)a);
228
}
229
 
230
END_DEF
231
 
232
#endif