46,30 → 46,30 |
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ |
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ |
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ |
|
|
__kernel_size_t strnlen(const char *s, __kernel_size_t count) |
{ |
const char *sc; |
|
for (sc = s; count-- && *sc != '\0'; ++sc) |
const char *sc; |
|
for (sc = s; count-- && *sc != '\0'; ++sc) |
/* nothing */; |
return sc - s; |
return sc - s; |
} |
|
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); |
|
struct resource ioport_resource = { |
.name = "PCI IO", |
.start = 0x0000, |
.end = IO_SPACE_LIMIT, |
.flags = IORESOURCE_IO, |
.name = "PCI IO", |
.start = 0x0000, |
.end = IO_SPACE_LIMIT, |
.flags = IORESOURCE_IO, |
}; |
|
struct resource iomem_resource = { |
.name = "PCI mem", |
.start = 0UL, |
.end = ~0UL, |
.flags = IORESOURCE_MEM, |
.name = "PCI mem", |
.start = 0UL, |
.end = ~0UL, |
.flags = IORESOURCE_MEM, |
}; |
|
/* Return the conflict entry if you can't request it */ |
203,7 → 203,6 |
{ |
int err; |
|
|
err = find_resource(root, new, size, min, max, align, alignf, alignf_data); |
if (err >= 0 && __request_resource(root, new)) |
err = -EBUSY; |
212,113 → 211,120 |
} |
|
int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) |
{ return 0; } |
{ |
return 0; |
} |
|
void dump_stack(void) { } |
void dump_stack(void) |
{ |
|
void panic(const char * fmt, ...) { |
} |
|
cprintf((char *)(fmt)); |
|
void panic(const char * fmt, ...) |
{ |
cprintf((char *)(fmt)); |
} |
|
extern void * malloc(size_t size); |
|
void *__kmalloc(size_t size, int flags) { |
|
return malloc(size); |
|
void *__kmalloc(size_t size, int flags) |
{ |
return malloc(size); |
} |
|
extern void free(void *); |
|
void kfree(const void *ptr) { |
|
free((void *)(ptr)); |
|
void kfree(const void *ptr) |
{ |
free((void *)(ptr)); |
} |
|
unsigned long pci_mem_start = 0x10000000; |
|
signed long schedule_timeout(signed long timeout) { |
signed long schedule_timeout(signed long timeout) |
{ |
struct timespec t,s,e; |
|
struct timespec t,s,e; |
jiffies_to_timespec(timeout, &t); |
|
jiffies_to_timespec(timeout, &t); |
|
if (!activeInt && !intr_count) { |
|
nanosleep(&t,NULL); |
|
} else { |
|
ll_gettime(TIME_NEW,&s); |
ADDTIMESPEC(&t,&s,&e); |
|
memory_barrier; |
|
while(TIMESPEC_A_LT_B(&s,&e)) { |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
|
} |
if (!activeInt && !intr_count) { |
nanosleep(&t,NULL); |
} else { |
ll_gettime(TIME_NEW,&s); |
ADDTIMESPEC(&t,&s,&e); |
|
return 0; |
memory_barrier; |
|
while(TIMESPEC_A_LT_B(&s,&e)) { |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
} |
|
return 0; |
} |
|
void __const_udelay(unsigned long usecs) { |
void __const_udelay(unsigned long usecs) |
{ |
struct timespec t,s,e; |
|
struct timespec t,s,e; |
if (!activeInt && !intr_count) { |
t.tv_sec = 0; |
t.tv_nsec = usecs * 1000; |
|
if (!activeInt && !intr_count) { |
|
t.tv_sec = 0; |
t.tv_nsec = usecs * 1000; |
|
nanosleep(&t,NULL); |
|
} else { |
nanosleep(&t,NULL); |
} else { |
ll_gettime(TIME_NEW,&e); |
ADDUSEC2TIMESPEC(usecs,&e); |
|
ll_gettime(TIME_NEW,&e); |
ADDUSEC2TIMESPEC(usecs,&e); |
memory_barrier; |
|
memory_barrier; |
|
ll_gettime(TIME_NEW,&s); |
while(TIMESPEC_A_LT_B(&s,&e)) { |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
|
} |
|
ll_gettime(TIME_NEW,&s); |
while(TIMESPEC_A_LT_B(&s,&e)) { |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
} |
} |
|
void * vmalloc_32(size_t size); |
|
void *dma_alloc_coherent(struct device *dev, size_t size, |
dma_addr_t *dma_handle, int gfp) |
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) |
{ |
void *ret; |
/* ignore region specifiers */ |
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
|
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) |
gfp |= GFP_DMA; |
ret = (void *)vmalloc_32(size); |
|
if (ret != NULL) { |
memset(ret, 0, size); |
*dma_handle = (dma_addr_t)ret; |
} |
return ret; |
void *ret; |
/* ignore region specifiers */ |
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
|
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) |
gfp |= GFP_DMA; |
ret = (void *)vmalloc_32(size); |
|
if (ret != NULL) { |
memset(ret, 0, size); |
*dma_handle = (dma_addr_t)ret; |
} |
return ret; |
} |
|
void dma_free_coherent(struct device *dev, size_t size, |
void *vaddr, dma_addr_t dma_handle) |
|
void *dma_alloc_coherent_usb(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) |
{ |
void *ret; |
/* ignore region specifiers */ |
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
|
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) |
gfp |= GFP_DMA; |
ret = (void *)vmalloc_32_usb(size); |
|
if (ret != NULL) { |
memset(ret, 0, size); |
*dma_handle = (dma_addr_t)ret; |
} |
return ret; |
} |
|
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) |
{ |
vfree((void *)dma_handle); |
} |
|
335,7 → 341,8 |
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0); |
} |
|
void wait_for_completion(struct completion *x) { |
void wait_for_completion(struct completion *x) |
{ |
spin_lock_irq(&x->wait.lock); |
if (!x->done) { |
DECLARE_WAITQUEUE(wait, current); |
358,99 → 365,122 |
.bus_id = "legacy", |
}; |
|
int register_chrdev(unsigned int a, const char *b, struct file_operations *c) { |
int register_chrdev(unsigned int a, const char *b, struct file_operations *c) |
{ |
return 0; |
} |
|
int unregister_chrdev(unsigned int a, const char *b) { |
int unregister_chrdev(unsigned int a, const char *b) |
{ |
return 0; |
} |
|
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) { |
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) |
{ |
return (void *)offset; |
} |
|
return (void *)offset; |
void iounmap(void *addr) |
{ |
|
} |
|
void iounmap(void *addr) { |
loff_t no_llseek(struct file *file, loff_t offset, int origin) |
{ |
return 0; |
} |
|
void *vmalloc(unsigned long size) |
{ |
return malloc(size); |
} |
|
loff_t no_llseek(struct file *file, loff_t offset, int origin) { |
void *kern_alloc_aligned(size_t size, DWORD flags, int align_bits, DWORD align_ofs); |
|
return 0; |
void * vmalloc_32(size_t size) |
{ |
void *mem; |
unsigned long diff; |
|
} |
mem = malloc(size+12); |
|
void *vmalloc(unsigned long size) { |
diff = (unsigned long)((((unsigned long)mem/4)+1)*4-(unsigned long)mem); |
|
return malloc(size); |
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
|
return (mem+diff+4); |
} |
|
void * vmalloc_32(size_t size) |
void vfree(void *addr) |
{ |
void *mem; |
unsigned long diff; |
|
mem = malloc(size+12); |
|
diff = (unsigned long)((((unsigned long)mem/4)+1)*4-(unsigned long)mem); |
|
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
|
return (mem+diff+4); |
|
if (addr == NULL || *(unsigned long *)(addr-4) == 0) |
return; |
|
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) { |
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4); |
*(unsigned long *)(addr-4) = 0; |
return; |
} |
|
free(addr); |
|
return; |
} |
|
void vfree(void *addr) { |
|
if (addr == NULL || *(unsigned long *)(addr-4) == 0) return; |
|
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) { |
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4); |
*(unsigned long *)(addr-4) = 0; |
return; |
} |
|
free(addr); |
|
return; |
|
} |
|
/* TODO */ |
char * strsep(char **a,const char *b) { |
void * vmalloc_32_usb(size_t size) |
{ |
void* mem; |
unsigned long diff; |
|
return NULL; |
mem = malloc( size + 2 * 4096 ); |
|
if (! mem) |
return NULL; |
|
diff = 4096 - (((unsigned long) mem) % 4096); |
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
|
return (void*)(mem + diff + 4096); |
} |
|
/* TODO */ |
char * strsep(char **a,const char *b) |
{ |
return NULL; |
} |
|
struct screen_info screen_info; |
|
int linuxcomp_setfd(struct inode *i, int i_rdev) { |
int linuxcomp_setfd(struct inode *i, int i_rdev) |
{ |
i->i_rdev = i_rdev; |
|
i->i_rdev = i_rdev; |
return 0; |
} |
|
int linuxcomp_init(void) |
{ |
return 0; |
|
} |
|
int linuxcomp_init(void) { |
struct page *mem_map = 0x0000; |
|
return 0; |
int schedule_work(struct work_struct *work) |
{ |
return 0; |
} |
|
int allow_signal(int sig) |
{ |
return 0; |
} |
|
struct page *mem_map = 0x0000; |
|
int schedule_work(struct work_struct *work) { return 0;} |
void flush_scheduled_work(void) { } |
void daemonize(const char *name, ...) { } |
int allow_signal(int sig) {return 0; } |
void yield(void) { } |
|
void do_exit(long code) { } |
|
void complete_and_exit(struct completion *comp, long code) |
{ |
if (comp) |
464,7 → 494,6 |
return __ioremap(offset, size, 0); |
} |
|
|
#define NULL_TIMESPEC(t) ((t)->tv_sec = (t)->tv_nsec = 0) |
|
int wait_ms26(unsigned long msec) |
480,4 → 509,3 |
|
return 0; |
} |
|