322,13 → 322,36 |
vfree((void *)dma_handle); |
} |
|
extern void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync); |
|
void init_completion(struct completion *x) { |
x->done = 0; |
init_waitqueue_head(&x->wait); |
} |
|
void complete(struct completion *c) { |
void complete(struct completion *x) |
{ |
x->done++; |
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0); |
} |
|
void wait_for_completion(struct completion *c) { |
void wait_for_completion(struct completion *x) { |
spin_lock_irq(&x->wait.lock); |
if (!x->done) { |
DECLARE_WAITQUEUE(wait, current); |
|
wait.flags |= WQ_FLAG_EXCLUSIVE; |
__add_wait_queue_tail(&x->wait, &wait); |
do { |
__set_current_state(TASK_UNINTERRUPTIBLE); |
spin_unlock_irq(&x->wait.lock); |
schedule(); |
spin_lock_irq(&x->wait.lock); |
} while (!x->done); |
__remove_wait_queue(&x->wait, &wait); |
} |
x->done--; |
spin_unlock_irq(&x->wait.lock); |
} |
|
struct device legacy_bus = { |
365,35 → 388,25 |
|
} |
|
void *kern_alloc_aligned(size_t size, DWORD flags, |
int align_bits, DWORD align_ofs); |
|
void * vmalloc_32(size_t size) |
{ |
void *mem; |
unsigned long diff; |
|
mem = malloc(size+12); |
|
diff = (unsigned long)((((unsigned long)mem/4)+1)*4-(unsigned long)mem); |
|
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
|
return (mem+diff+4); |
mem = kern_alloc_aligned(size, 0, 12, 0); |
|
return mem; |
|
} |
|
void vfree(void *addr) { |
|
if (addr == NULL || *(unsigned long *)(addr-4) == 0) return; |
free(addr); |
|
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) { |
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4); |
*(unsigned long *)(addr-4) = 0; |
return; |
} |
|
free(addr); |
|
return; |
|
} |
|
/* TODO */ |
418,3 → 431,43 |
return 0; |
|
} |
|
struct page *mem_map = 0x0000; |
|
int schedule_work(struct work_struct *work) { return 0;} |
void flush_scheduled_work(void) { } |
void daemonize(const char *name, ...) { } |
int allow_signal(int sig) {return 0; } |
void yield(void) { } |
|
void do_exit(long code) { } |
void complete_and_exit(struct completion *comp, long code) |
{ |
if (comp) |
complete(comp); |
|
do_exit(code); |
} |
|
inline void * ioremap_nocache(unsigned long offset, unsigned long size) |
{ |
return __ioremap(offset, size, 0); |
} |
|
|
#define NULL_TIMESPEC(t) ((t)->tv_sec = (t)->tv_nsec = 0) |
|
int wait_ms26(unsigned long msec) |
{ |
struct timespec t1, t2; |
int nsec; |
|
t1.tv_sec = msec/1000; |
nsec = (msec % 1000) * 1000000; |
t1.tv_nsec = nsec; |
NULL_TIMESPEC(&t2); |
nanosleep(&t1, &t2); |
|
return 0; |
} |
|