Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 1046 → Rev 1047

/shark/trunk/drivers/linuxc26/include/linux/pci.h
1,5 → 1,5
/*
* $Id: pci.h,v 1.3 2004-02-20 11:35:57 giacomo Exp $
* $Id: pci.h,v 1.4 2006-07-04 15:21:01 mauro Exp $
*
* PCI defines and function prototypes
* Copyright 1994, Drew Eckhardt
691,6 → 691,8
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
 
void *pci_pool_alloc_usb (struct pci_pool *pool, int flags, dma_addr_t *handle);
 
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
extern struct pci_dev *isa_bridge;
#endif
/shark/trunk/drivers/linuxc26/include/linux/vmalloc.h
24,8 → 24,10
*/
extern void *vmalloc(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_usb(unsigned long size);
extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void vfree_usb(void *addr);
 
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
/shark/trunk/drivers/linuxc26/include/asm/dma-mapping.h
7,8 → 7,11
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag);
dma_addr_t *dma_handle, int flag);
 
void *dma_alloc_coherent_usb(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag);
 
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
 
/shark/trunk/drivers/linuxc26/include/asm-generic/pci-dma-compat.h
22,6 → 22,13
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
 
static inline void *
pci_alloc_consistent_usb(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent_usb(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
 
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
/shark/trunk/drivers/linuxc26/videodev.c
94,7 → 94,7
* Active devices
*/
static struct video_device *video_device[VIDEO_NUM_DEVICES];
/*static*/ struct video_device *video_device[VIDEO_NUM_DEVICES];
static DECLARE_MUTEX(videodev_lock);
 
struct video_device* video_devdata(struct file *file)
/shark/trunk/drivers/linuxc26/linuxcomp.c
46,30 → 46,30
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
 
__kernel_size_t strnlen(const char *s, __kernel_size_t count)
{
const char *sc;
for (sc = s; count-- && *sc != '\0'; ++sc)
const char *sc;
 
for (sc = s; count-- && *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
return sc - s;
}
 
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
 
struct resource ioport_resource = {
.name = "PCI IO",
.start = 0x0000,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
.name = "PCI IO",
.start = 0x0000,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
 
struct resource iomem_resource = {
.name = "PCI mem",
.start = 0UL,
.end = ~0UL,
.flags = IORESOURCE_MEM,
.name = "PCI mem",
.start = 0UL,
.end = ~0UL,
.flags = IORESOURCE_MEM,
};
 
/* Return the conflict entry if you can't request it */
203,7 → 203,6
{
int err;
 
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
if (err >= 0 && __request_resource(root, new))
err = -EBUSY;
212,113 → 211,120
}
 
int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
{ return 0; }
{
return 0;
}
 
void dump_stack(void) { }
void dump_stack(void)
{
 
void panic(const char * fmt, ...) {
}
 
cprintf((char *)(fmt));
 
void panic(const char * fmt, ...)
{
cprintf((char *)(fmt));
}
 
extern void * malloc(size_t size);
 
void *__kmalloc(size_t size, int flags) {
 
return malloc(size);
 
void *__kmalloc(size_t size, int flags)
{
return malloc(size);
}
 
extern void free(void *);
 
void kfree(const void *ptr) {
 
free((void *)(ptr));
 
void kfree(const void *ptr)
{
free((void *)(ptr));
}
 
unsigned long pci_mem_start = 0x10000000;
 
signed long schedule_timeout(signed long timeout) {
signed long schedule_timeout(signed long timeout)
{
struct timespec t,s,e;
 
struct timespec t,s,e;
jiffies_to_timespec(timeout, &t);
 
jiffies_to_timespec(timeout, &t);
if (!activeInt && !intr_count) {
nanosleep(&t,NULL);
} else {
ll_gettime(TIME_NEW,&s);
ADDTIMESPEC(&t,&s,&e);
memory_barrier;
while(TIMESPEC_A_LT_B(&s,&e)) {
memory_barrier;
ll_gettime(TIME_NEW,&s);
}
}
if (!activeInt && !intr_count) {
nanosleep(&t,NULL);
} else {
ll_gettime(TIME_NEW,&s);
ADDTIMESPEC(&t,&s,&e);
 
return 0;
memory_barrier;
 
while(TIMESPEC_A_LT_B(&s,&e)) {
memory_barrier;
ll_gettime(TIME_NEW,&s);
}
}
 
return 0;
}
 
void __const_udelay(unsigned long usecs) {
void __const_udelay(unsigned long usecs)
{
struct timespec t,s,e;
 
struct timespec t,s,e;
if (!activeInt && !intr_count) {
t.tv_sec = 0;
t.tv_nsec = usecs * 1000;
 
if (!activeInt && !intr_count) {
t.tv_sec = 0;
t.tv_nsec = usecs * 1000;
nanosleep(&t,NULL);
} else {
nanosleep(&t,NULL);
} else {
ll_gettime(TIME_NEW,&e);
ADDUSEC2TIMESPEC(usecs,&e);
 
ll_gettime(TIME_NEW,&e);
ADDUSEC2TIMESPEC(usecs,&e);
memory_barrier;
 
memory_barrier;
 
ll_gettime(TIME_NEW,&s);
while(TIMESPEC_A_LT_B(&s,&e)) {
memory_barrier;
ll_gettime(TIME_NEW,&s);
}
 
}
ll_gettime(TIME_NEW,&s);
while(TIMESPEC_A_LT_B(&s,&e)) {
memory_barrier;
ll_gettime(TIME_NEW,&s);
}
}
}
 
void * vmalloc_32(size_t size);
 
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp)
{
void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)vmalloc_32(size);
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = (dma_addr_t)ret;
}
return ret;
void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)vmalloc_32(size);
 
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = (dma_addr_t)ret;
}
return ret;
}
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
 
void *dma_alloc_coherent_usb(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp)
{
void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)vmalloc_32_usb(size);
 
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = (dma_addr_t)ret;
}
return ret;
}
 
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
vfree((void *)dma_handle);
}
 
335,7 → 341,8
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
}
 
void wait_for_completion(struct completion *x) {
void wait_for_completion(struct completion *x)
{
spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
358,99 → 365,122
.bus_id = "legacy",
};
 
int register_chrdev(unsigned int a, const char *b, struct file_operations *c) {
int register_chrdev(unsigned int a, const char *b, struct file_operations *c)
{
return 0;
}
 
int unregister_chrdev(unsigned int a, const char *b) {
int unregister_chrdev(unsigned int a, const char *b)
{
return 0;
}
 
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) {
void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags)
{
return (void *)offset;
}
 
return (void *)offset;
void iounmap(void *addr)
{
 
}
 
void iounmap(void *addr) {
loff_t no_llseek(struct file *file, loff_t offset, int origin)
{
return 0;
}
 
void *vmalloc(unsigned long size)
{
return malloc(size);
}
 
loff_t no_llseek(struct file *file, loff_t offset, int origin) {
void *kern_alloc_aligned(size_t size, DWORD flags, int align_bits, DWORD align_ofs);
 
return 0;
void * vmalloc_32(size_t size)
{
void *mem;
unsigned long diff;
 
}
mem = malloc(size+12);
 
void *vmalloc(unsigned long size) {
diff = (unsigned long)((((unsigned long)mem/4)+1)*4-(unsigned long)mem);
 
return malloc(size);
*(unsigned long *)(mem+diff) = (diff | 0x80000000);
 
return (mem+diff+4);
}
 
void * vmalloc_32(size_t size)
void vfree(void *addr)
{
void *mem;
unsigned long diff;
mem = malloc(size+12);
diff = (unsigned long)((((unsigned long)mem/4)+1)*4-(unsigned long)mem);
*(unsigned long *)(mem+diff) = (diff | 0x80000000);
return (mem+diff+4);
if (addr == NULL || *(unsigned long *)(addr-4) == 0)
return;
 
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) {
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4);
*(unsigned long *)(addr-4) = 0;
return;
}
 
free(addr);
 
return;
}
void vfree(void *addr) {
if (addr == NULL || *(unsigned long *)(addr-4) == 0) return;
if ((*(unsigned long *)(addr-4) & 0x80000000) == 0x80000000) {
free(addr-(*(unsigned long *)(addr-4) & 0x7FFFFFFF)-4);
*(unsigned long *)(addr-4) = 0;
return;
}
free(addr);
return;
}
 
/* TODO */
char * strsep(char **a,const char *b) {
void * vmalloc_32_usb(size_t size)
{
void* mem;
unsigned long diff;
 
return NULL;
mem = malloc( size + 2 * 4096 );
 
if (! mem)
return NULL;
 
diff = 4096 - (((unsigned long) mem) % 4096);
*(unsigned long *)(mem+diff) = (diff | 0x80000000);
 
return (void*)(mem + diff + 4096);
}
 
/* TODO */
char * strsep(char **a,const char *b)
{
return NULL;
}
 
struct screen_info screen_info;
 
int linuxcomp_setfd(struct inode *i, int i_rdev) {
int linuxcomp_setfd(struct inode *i, int i_rdev)
{
i->i_rdev = i_rdev;
 
i->i_rdev = i_rdev;
return 0;
}
 
int linuxcomp_init(void)
{
return 0;
 
}
 
int linuxcomp_init(void) {
struct page *mem_map = 0x0000;
 
return 0;
int schedule_work(struct work_struct *work)
{
return 0;
}
 
int allow_signal(int sig)
{
return 0;
}
 
struct page *mem_map = 0x0000;
 
int schedule_work(struct work_struct *work) { return 0;}
void flush_scheduled_work(void) { }
void daemonize(const char *name, ...) { }
int allow_signal(int sig) {return 0; }
void yield(void) { }
 
void do_exit(long code) { }
 
void complete_and_exit(struct completion *comp, long code)
{
if (comp)
464,7 → 494,6
return __ioremap(offset, size, 0);
}
 
 
#define NULL_TIMESPEC(t) ((t)->tv_sec = (t)->tv_nsec = 0)
 
int wait_ms26(unsigned long msec)
480,4 → 509,3
 
return 0;
}
 
/shark/trunk/drivers/pci/pool.c
186,7 → 186,34
return page;
}
 
static struct pci_page *
pool_alloc_page_usb (struct pci_pool *pool, int mem_flags)
{
struct pci_page *page;
int mapsize;
 
mapsize = pool->blocks_per_page;
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
mapsize *= sizeof (long);
 
page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
if (!page)
return 0;
page->vaddr = pci_alloc_consistent_usb (pool->dev, pool->allocation, &page->dma);
if (page->vaddr) {
memset (page->bitmap, 0xff, mapsize); // bit set == free
#ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
list_add (&page->page_list, &pool->page_list);
page->in_use = 0;
} else {
kfree (page);
page = 0;
}
return page;
}
 
static inline int
is_page_busy (int blocks, unsigned long *bitmap)
{
211,7 → 238,6
kfree (page);
}
 
 
/**
* pci_pool_destroy - destroys a pool of pci memory blocks.
* @pool: pci pool that will be destroyed
247,7 → 273,6
kfree (pool);
}
 
 
/**
* pci_pool_alloc - get a block of consistent memory
* @pool: pci pool that will produce the block
319,7 → 344,77
return retval;
}
 
/**
* pci_pool_alloc_usb - get a block of consistent memory
* @pool: pci pool that will produce the block
* @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, null is returned.
*/
void *
pci_pool_alloc_usb (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
{
unsigned long flags;
struct list_head *entry;
struct pci_page *page;
int map, block;
size_t offset;
void *retval;
 
restart:
spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) {
int i;
page = list_entry (entry, struct pci_page, page_list);
/* only cachable accesses here ... */
for (map = 0, i = 0;
i < pool->blocks_per_page;
i += BITS_PER_LONG, map++) {
if (page->bitmap [map] == 0)
continue;
block = ffz (~ page->bitmap [map]);
if ((i + block) < pool->blocks_per_page) {
clear_bit (block, &page->bitmap [map]);
offset = (BITS_PER_LONG * map) + block;
offset *= pool->size;
goto ready;
}
}
}
if (!(page = pool_alloc_page_usb (pool, SLAB_ATOMIC))) {
if (mem_flags == SLAB_KERNEL) {
DECLARE_WAITQUEUE (wait, current);
 
//current->state = TASK_INTERRUPTIBLE;
add_wait_queue (&pool->waitq, &wait);
spin_unlock_irqrestore (&pool->lock, flags);
 
schedule_timeout (POOL_TIMEOUT_JIFFIES);
 
remove_wait_queue (&pool->waitq, &wait);
goto restart;
}
retval = 0;
goto done;
}
 
clear_bit (0, &page->bitmap [0]);
offset = 0;
ready:
page->in_use++;
retval = offset + page->vaddr;
*handle = offset + page->dma;
#ifdef CONFIG_DEBUG_SLAB
memset (retval, POOL_POISON_ALLOCATED, pool->size);
#endif
done:
spin_unlock_irqrestore (&pool->lock, flags);
return retval;
}
 
static struct pci_page *
pool_find_page (struct pci_pool *pool, dma_addr_t dma)
{
341,7 → 436,6
return page;
}
 
 
/**
* pci_pool_free - put block back into pci pool
* @pool: the pci pool holding the block
399,7 → 493,6
spin_unlock_irqrestore (&pool->lock, flags);
}
 
 
EXPORT_SYMBOL (pci_pool_create);
EXPORT_SYMBOL (pci_pool_destroy);
EXPORT_SYMBOL (pci_pool_alloc);