/shark/trunk/drivers/linuxc26/include/linux/pci.h |
---|
1,5 → 1,5 |
/* |
* $Id: pci.h,v 1.3 2004-02-20 11:35:57 giacomo Exp $ |
* $Id: pci.h,v 1.4 2006-07-04 15:21:01 mauro Exp $ |
* |
* PCI defines and function prototypes |
* Copyright 1994, Drew Eckhardt |
691,6 → 691,8 |
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle); |
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr); |
void *pci_pool_alloc_usb (struct pci_pool *pool, int flags, dma_addr_t *handle); |
#if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
extern struct pci_dev *isa_bridge; |
#endif |
/shark/trunk/drivers/linuxc26/include/linux/vmalloc.h |
---|
24,8 → 24,10 |
*/ |
extern void *vmalloc(unsigned long size); |
extern void *vmalloc_32(unsigned long size); |
extern void *vmalloc_32_usb(unsigned long size); |
extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot); |
extern void vfree(void *addr); |
extern void vfree_usb(void *addr); |
extern void *vmap(struct page **pages, unsigned int count, |
unsigned long flags, pgprot_t prot); |
/shark/trunk/drivers/linuxc26/include/asm/dma-mapping.h |
---|
9,6 → 9,9 |
void *dma_alloc_coherent(struct device *dev, size_t size, |
dma_addr_t *dma_handle, int flag); |
void *dma_alloc_coherent_usb(struct device *dev, size_t size, |
dma_addr_t *dma_handle, int flag); |
void dma_free_coherent(struct device *dev, size_t size, |
void *vaddr, dma_addr_t dma_handle); |
/shark/trunk/drivers/linuxc26/include/asm-generic/pci-dma-compat.h |
---|
22,6 → 22,13 |
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); |
} |
static inline void * |
pci_alloc_consistent_usb(struct pci_dev *hwdev, size_t size, |
dma_addr_t *dma_handle) |
{ |
return dma_alloc_coherent_usb(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); |
} |
static inline void |
pci_free_consistent(struct pci_dev *hwdev, size_t size, |
void *vaddr, dma_addr_t dma_handle) |
/shark/trunk/drivers/linuxc26/videodev.c |
---|
94,7 → 94,7 |
* Active devices |
*/ |
static struct video_device *video_device[VIDEO_NUM_DEVICES]; |
/*static*/ struct video_device *video_device[VIDEO_NUM_DEVICES]; |
static DECLARE_MUTEX(videodev_lock); |
struct video_device* video_devdata(struct file *file) |
/shark/trunk/drivers/linuxc26/linuxcomp.c |
---|
203,7 → 203,6 |
{ |
int err; |
err = find_resource(root, new, size, min, max, align, alignf, alignf_data); |
if (err >= 0 && __request_resource(root, new)) |
err = -EBUSY; |
212,46 → 211,45 |
} |
int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) |
{ return 0; } |
{ |
return 0; |
} |
void dump_stack(void) { } |
void dump_stack(void) |
{ |
void panic(const char * fmt, ...) { |
} |
void panic(const char * fmt, ...) |
{ |
cprintf((char *)(fmt)); |
} |
extern void * malloc(size_t size); |
void *__kmalloc(size_t size, int flags) { |
void *__kmalloc(size_t size, int flags) |
{ |
return malloc(size); |
} |
extern void free(void *); |
void kfree(const void *ptr) { |
void kfree(const void *ptr) |
{ |
free((void *)(ptr)); |
} |
unsigned long pci_mem_start = 0x10000000; |
signed long schedule_timeout(signed long timeout) { |
signed long schedule_timeout(signed long timeout) |
{ |
struct timespec t,s,e; |
jiffies_to_timespec(timeout, &t); |
if (!activeInt && !intr_count) { |
nanosleep(&t,NULL); |
} else { |
ll_gettime(TIME_NEW,&s); |
ADDTIMESPEC(&t,&s,&e); |
261,26 → 259,21 |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
} |
return 0; |
} |
void __const_udelay(unsigned long usecs) { |
void __const_udelay(unsigned long usecs) |
{ |
struct timespec t,s,e; |
if (!activeInt && !intr_count) { |
t.tv_sec = 0; |
t.tv_nsec = usecs * 1000; |
nanosleep(&t,NULL); |
} else { |
ll_gettime(TIME_NEW,&e); |
ADDUSEC2TIMESPEC(usecs,&e); |
291,15 → 284,12 |
memory_barrier; |
ll_gettime(TIME_NEW,&s); |
} |
} |
} |
void * vmalloc_32(size_t size); |
void *dma_alloc_coherent(struct device *dev, size_t size, |
dma_addr_t *dma_handle, int gfp) |
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) |
{ |
void *ret; |
/* ignore region specifiers */ |
316,9 → 306,25 |
return ret; |
} |
void dma_free_coherent(struct device *dev, size_t size, |
void *vaddr, dma_addr_t dma_handle) |
void *dma_alloc_coherent_usb(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) |
{ |
void *ret; |
/* ignore region specifiers */ |
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) |
gfp |= GFP_DMA; |
ret = (void *)vmalloc_32_usb(size); |
if (ret != NULL) { |
memset(ret, 0, size); |
*dma_handle = (dma_addr_t)ret; |
} |
return ret; |
} |
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) |
{ |
vfree((void *)dma_handle); |
} |
335,7 → 341,8 |
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0); |
} |
void wait_for_completion(struct completion *x) { |
void wait_for_completion(struct completion *x) |
{ |
spin_lock_irq(&x->wait.lock); |
if (!x->done) { |
DECLARE_WAITQUEUE(wait, current); |
358,36 → 365,38 |
.bus_id = "legacy", |
}; |
int register_chrdev(unsigned int a, const char *b, struct file_operations *c) { |
int register_chrdev(unsigned int a, const char *b, struct file_operations *c) |
{ |
return 0; |
} |
int unregister_chrdev(unsigned int a, const char *b) { |
int unregister_chrdev(unsigned int a, const char *b) |
{ |
return 0; |
} |
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) { |
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) |
{ |
return (void *)offset; |
} |
void iounmap(void *addr) { |
void iounmap(void *addr) |
{ |
} |
loff_t no_llseek(struct file *file, loff_t offset, int origin) { |
loff_t no_llseek(struct file *file, loff_t offset, int origin) |
{ |
return 0; |
} |
void *vmalloc(unsigned long size) { |
void *vmalloc(unsigned long size) |
{ |
return malloc(size); |
} |
void *kern_alloc_aligned(size_t size, DWORD flags, int align_bits, DWORD align_ofs); |
void * vmalloc_32(size_t size) |
{ |
void *mem; |
400,13 → 409,13 |
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
return (mem+diff+4); |
} |
void vfree(void *addr) { |
void vfree(void *addr) |
{ |
if (addr == NULL || *(unsigned long *)(addr-4) == 0) |
return; |
if (addr == NULL || *(unsigned long *)(addr-4) == 0) return; |
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) { |
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4); |
*(unsigned long *)(addr-4) = 0; |
416,41 → 425,62 |
free(addr); |
return; |
} |
/* TODO */ |
char * strsep(char **a,const char *b) { |
void * vmalloc_32_usb(size_t size) |
{ |
void* mem; |
unsigned long diff; |
mem = malloc( size + 2 * 4096 ); |
if (! mem) |
return NULL; |
diff = 4096 - (((unsigned long) mem) % 4096); |
*(unsigned long *)(mem+diff) = (diff | 0x80000000); |
return (void*)(mem + diff + 4096); |
} |
/* TODO */ |
char * strsep(char **a,const char *b) |
{ |
return NULL; |
} |
struct screen_info screen_info; |
int linuxcomp_setfd(struct inode *i, int i_rdev) { |
int linuxcomp_setfd(struct inode *i, int i_rdev) |
{ |
i->i_rdev = i_rdev; |
return 0; |
} |
int linuxcomp_init(void) |
{ |
return 0; |
} |
int linuxcomp_init(void) { |
struct page *mem_map = 0x0000; |
int schedule_work(struct work_struct *work) |
{ |
return 0; |
} |
int allow_signal(int sig) |
{ |
return 0; |
} |
struct page *mem_map = 0x0000; |
int schedule_work(struct work_struct *work) { return 0;} |
void flush_scheduled_work(void) { } |
void daemonize(const char *name, ...) { } |
int allow_signal(int sig) {return 0; } |
void yield(void) { } |
void do_exit(long code) { } |
void complete_and_exit(struct completion *comp, long code) |
{ |
if (comp) |
464,7 → 494,6 |
return __ioremap(offset, size, 0); |
} |
#define NULL_TIMESPEC(t) ((t)->tv_sec = (t)->tv_nsec = 0) |
int wait_ms26(unsigned long msec) |
480,4 → 509,3 |
return 0; |
} |
/shark/trunk/drivers/pci/pool.c |
---|
186,7 → 186,34 |
return page; |
} |
static struct pci_page * |
pool_alloc_page_usb (struct pci_pool *pool, int mem_flags) |
{ |
struct pci_page *page; |
int mapsize; |
mapsize = pool->blocks_per_page; |
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
mapsize *= sizeof (long); |
page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags); |
if (!page) |
return 0; |
page->vaddr = pci_alloc_consistent_usb (pool->dev, pool->allocation, &page->dma); |
if (page->vaddr) { |
memset (page->bitmap, 0xff, mapsize); // bit set == free |
#ifdef CONFIG_DEBUG_SLAB |
memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
list_add (&page->page_list, &pool->page_list); |
page->in_use = 0; |
} else { |
kfree (page); |
page = 0; |
} |
return page; |
} |
static inline int |
is_page_busy (int blocks, unsigned long *bitmap) |
{ |
211,7 → 238,6 |
kfree (page); |
} |
/** |
* pci_pool_destroy - destroys a pool of pci memory blocks. |
* @pool: pci pool that will be destroyed |
247,7 → 273,6 |
kfree (pool); |
} |
/** |
* pci_pool_alloc - get a block of consistent memory |
* @pool: pci pool that will produce the block |
319,7 → 344,77 |
return retval; |
} |
/** |
* pci_pool_alloc_usb - get a block of consistent memory |
* @pool: pci pool that will produce the block |
* @mem_flags: SLAB_KERNEL or SLAB_ATOMIC |
* @handle: pointer to dma address of block |
* |
* This returns the kernel virtual address of a currently unused block, |
* and reports its dma address through the handle. |
* If such a memory block can't be allocated, null is returned. |
*/ |
void * |
pci_pool_alloc_usb (struct pci_pool *pool, int mem_flags, dma_addr_t *handle) |
{ |
unsigned long flags; |
struct list_head *entry; |
struct pci_page *page; |
int map, block; |
size_t offset; |
void *retval; |
restart: |
spin_lock_irqsave (&pool->lock, flags); |
list_for_each (entry, &pool->page_list) { |
int i; |
page = list_entry (entry, struct pci_page, page_list); |
/* only cachable accesses here ... */ |
for (map = 0, i = 0; |
i < pool->blocks_per_page; |
i += BITS_PER_LONG, map++) { |
if (page->bitmap [map] == 0) |
continue; |
block = ffz (~ page->bitmap [map]); |
if ((i + block) < pool->blocks_per_page) { |
clear_bit (block, &page->bitmap [map]); |
offset = (BITS_PER_LONG * map) + block; |
offset *= pool->size; |
goto ready; |
} |
} |
} |
if (!(page = pool_alloc_page_usb (pool, SLAB_ATOMIC))) { |
if (mem_flags == SLAB_KERNEL) { |
DECLARE_WAITQUEUE (wait, current); |
//current->state = TASK_INTERRUPTIBLE; |
add_wait_queue (&pool->waitq, &wait); |
spin_unlock_irqrestore (&pool->lock, flags); |
schedule_timeout (POOL_TIMEOUT_JIFFIES); |
remove_wait_queue (&pool->waitq, &wait); |
goto restart; |
} |
retval = 0; |
goto done; |
} |
clear_bit (0, &page->bitmap [0]); |
offset = 0; |
ready: |
page->in_use++; |
retval = offset + page->vaddr; |
*handle = offset + page->dma; |
#ifdef CONFIG_DEBUG_SLAB |
memset (retval, POOL_POISON_ALLOCATED, pool->size); |
#endif |
done: |
spin_unlock_irqrestore (&pool->lock, flags); |
return retval; |
} |
static struct pci_page * |
pool_find_page (struct pci_pool *pool, dma_addr_t dma) |
{ |
341,7 → 436,6 |
return page; |
} |
/** |
* pci_pool_free - put block back into pci pool |
* @pool: the pci pool holding the block |
399,7 → 493,6 |
spin_unlock_irqrestore (&pool->lock, flags); |
} |
EXPORT_SYMBOL (pci_pool_create); |
EXPORT_SYMBOL (pci_pool_destroy); |
EXPORT_SYMBOL (pci_pool_alloc); |