/shark/trunk/drivers/linuxc26/include/linux/device.h |
---|
1,408 → 1,418 |
/* |
* device.h - generic, centralized driver model |
* |
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> |
* |
* This file is released under the GPLv2 |
* |
* See Documentation/driver-model/ for more information. |
*/ |
#ifndef _DEVICE_H_ |
#define _DEVICE_H_ |
#include <linux/config.h> |
#include <linux/ioport.h> |
#include <linux/kobject.h> |
#include <linux/list.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
#include <linux/ioport.h> |
#include <linux/module.h> |
#include <linux/pm.h> |
#include <asm/semaphore.h> |
#include <asm/atomic.h> |
#define DEVICE_NAME_SIZE 50 |
#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */ |
#define DEVICE_ID_SIZE 32 |
#define BUS_ID_SIZE KOBJ_NAME_LEN |
enum { |
SUSPEND_NOTIFY, |
SUSPEND_SAVE_STATE, |
SUSPEND_DISABLE, |
SUSPEND_POWER_DOWN, |
}; |
enum { |
RESUME_POWER_ON, |
RESUME_RESTORE_STATE, |
RESUME_ENABLE, |
}; |
struct device; |
struct device_driver; |
struct class; |
struct class_device; |
struct bus_type { |
char * name; |
struct subsystem subsys; |
struct kset drivers; |
struct kset devices; |
int (*match)(struct device * dev, struct device_driver * drv); |
struct device * (*add) (struct device * parent, char * bus_id); |
int (*hotplug) (struct device *dev, char **envp, |
int num_envp, char *buffer, int buffer_size); |
int (*suspend)(struct device * dev, u32 state); |
int (*resume)(struct device * dev); |
}; |
extern int bus_register(struct bus_type * bus); |
extern void bus_unregister(struct bus_type * bus); |
extern int bus_rescan_devices(struct bus_type * bus); |
extern struct bus_type * get_bus(struct bus_type * bus); |
extern void put_bus(struct bus_type * bus); |
extern struct bus_type * find_bus(char * name); |
/* iterator helpers for buses */ |
int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data, |
int (*fn)(struct device *, void *)); |
int bus_for_each_drv(struct bus_type * bus, struct device_driver * start, |
void * data, int (*fn)(struct device_driver *, void *)); |
/* driverfs interface for exporting bus attributes */ |
struct bus_attribute { |
struct attribute attr; |
ssize_t (*show)(struct bus_type *, char * buf); |
ssize_t (*store)(struct bus_type *, const char * buf, size_t count); |
}; |
#define BUS_ATTR(_name,_mode,_show,_store) \ |
struct bus_attribute bus_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int bus_create_file(struct bus_type *, struct bus_attribute *); |
extern void bus_remove_file(struct bus_type *, struct bus_attribute *); |
struct device_driver { |
char * name; |
struct bus_type * bus; |
struct semaphore unload_sem; |
struct kobject kobj; |
struct list_head devices; |
int (*probe) (struct device * dev); |
int (*remove) (struct device * dev); |
void (*shutdown) (struct device * dev); |
int (*suspend) (struct device * dev, u32 state, u32 level); |
int (*resume) (struct device * dev, u32 level); |
}; |
extern int driver_register(struct device_driver * drv); |
extern void driver_unregister(struct device_driver * drv); |
extern struct device_driver * get_driver(struct device_driver * drv); |
extern void put_driver(struct device_driver * drv); |
/* driverfs interface for exporting driver attributes */ |
struct driver_attribute { |
struct attribute attr; |
ssize_t (*show)(struct device_driver *, char * buf); |
ssize_t (*store)(struct device_driver *, const char * buf, size_t count); |
}; |
#define DRIVER_ATTR(_name,_mode,_show,_store) \ |
struct driver_attribute driver_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int driver_create_file(struct device_driver *, struct driver_attribute *); |
extern void driver_remove_file(struct device_driver *, struct driver_attribute *); |
/* |
* device classes |
*/ |
struct class { |
char * name; |
struct subsystem subsys; |
struct list_head children; |
struct list_head interfaces; |
int (*hotplug)(struct class_device *dev, char **envp, |
int num_envp, char *buffer, int buffer_size); |
void (*release)(struct class_device *dev); |
}; |
extern int class_register(struct class *); |
extern void class_unregister(struct class *); |
extern struct class * class_get(struct class *); |
extern void class_put(struct class *); |
struct class_attribute { |
struct attribute attr; |
ssize_t (*show)(struct class *, char * buf); |
ssize_t (*store)(struct class *, const char * buf, size_t count); |
}; |
#define CLASS_ATTR(_name,_mode,_show,_store) \ |
struct class_attribute class_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int class_create_file(struct class *, const struct class_attribute *); |
extern void class_remove_file(struct class *, const struct class_attribute *); |
struct class_device { |
struct list_head node; |
struct kobject kobj; |
struct class * class; /* required */ |
struct device * dev; /* not necessary, but nice to have */ |
void * class_data; /* class-specific data */ |
char class_id[BUS_ID_SIZE]; /* unique to this class */ |
}; |
static inline void * |
class_get_devdata (struct class_device *dev) |
{ |
return dev->class_data; |
} |
static inline void |
class_set_devdata (struct class_device *dev, void *data) |
{ |
dev->class_data = data; |
} |
extern int class_device_register(struct class_device *); |
extern void class_device_unregister(struct class_device *); |
extern void class_device_initialize(struct class_device *); |
extern int class_device_add(struct class_device *); |
extern void class_device_del(struct class_device *); |
extern int class_device_rename(struct class_device *, char *); |
extern struct class_device * class_device_get(struct class_device *); |
extern void class_device_put(struct class_device *); |
struct class_device_attribute { |
struct attribute attr; |
ssize_t (*show)(struct class_device *, char * buf); |
ssize_t (*store)(struct class_device *, const char * buf, size_t count); |
}; |
#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \ |
struct class_device_attribute class_device_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int class_device_create_file(struct class_device *, |
const struct class_device_attribute *); |
extern void class_device_remove_file(struct class_device *, |
const struct class_device_attribute *); |
struct class_interface { |
struct list_head node; |
struct class *class; |
int (*add) (struct class_device *); |
void (*remove) (struct class_device *); |
}; |
extern int class_interface_register(struct class_interface *); |
extern void class_interface_unregister(struct class_interface *); |
struct device { |
struct list_head node; /* node in sibling list */ |
struct list_head bus_list; /* node in bus's list */ |
struct list_head driver_list; |
struct list_head children; |
struct device * parent; |
struct completion * complete; /* Notification for freeing device. */ |
struct kobject kobj; |
char bus_id[BUS_ID_SIZE]; /* position on parent bus */ |
struct bus_type * bus; /* type of bus device is on */ |
struct device_driver *driver; /* which driver has allocated this |
device */ |
void *driver_data; /* data private to the driver */ |
void *platform_data; /* Platform specific data (e.g. ACPI, |
BIOS data relevant to device) */ |
struct dev_pm_info power; |
u32 power_state; /* Current operating state. In |
ACPI-speak, this is D0-D3, D0 |
being fully functional, and D3 |
being off. */ |
unsigned char *saved_state; /* saved device state */ |
u32 detach_state; /* State to enter when device is |
detached from its driver. */ |
u64 *dma_mask; /* dma mask (if dma'able device) */ |
void (*release)(struct device * dev); |
}; |
static inline struct device * |
list_to_dev(struct list_head *node) |
{ |
return list_entry(node, struct device, node); |
} |
static inline void * |
dev_get_drvdata (struct device *dev) |
{ |
return dev->driver_data; |
} |
static inline void |
dev_set_drvdata (struct device *dev, void *data) |
{ |
dev->driver_data = data; |
} |
/* |
* High level routines for use by the bus drivers |
*/ |
extern int device_register(struct device * dev); |
extern void device_unregister(struct device * dev); |
extern void device_unregister_wait(struct device * dev); |
extern void device_initialize(struct device * dev); |
extern int device_add(struct device * dev); |
extern void device_del(struct device * dev); |
extern int device_for_each_child(struct device *, void *, |
int (*fn)(struct device *, void *)); |
/* |
* Manual binding of a device to driver. See drivers/base/bus.c |
* for information on use. |
*/ |
extern void device_bind_driver(struct device * dev); |
extern void device_release_driver(struct device * dev); |
extern void driver_attach(struct device_driver * drv); |
/* driverfs interface for exporting device attributes */ |
struct device_attribute { |
struct attribute attr; |
ssize_t (*show)(struct device * dev, char * buf); |
ssize_t (*store)(struct device * dev, const char * buf, size_t count); |
}; |
#define DEVICE_ATTR(_name,_mode,_show,_store) \ |
struct device_attribute dev_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int device_create_file(struct device *device, struct device_attribute * entry); |
extern void device_remove_file(struct device * dev, struct device_attribute * attr); |
/* |
* Platform "fixup" functions - allow the platform to have their say |
* about devices and actions that the general device layer doesn't |
* know about. |
*/ |
/* Notify platform of device discovery */ |
extern int (*platform_notify)(struct device * dev); |
extern int (*platform_notify_remove)(struct device * dev); |
/** |
* get_device - atomically increment the reference count for the device. |
* |
*/ |
extern struct device * get_device(struct device * dev); |
extern void put_device(struct device * dev); |
/* drivers/base/platform.c */ |
struct platform_device { |
char * name; |
u32 id; |
struct device dev; |
u32 num_resources; |
struct resource * resource; |
}; |
#define to_platform_device(x) container_of((x), struct platform_device, dev) |
extern int platform_device_register(struct platform_device *); |
extern void platform_device_unregister(struct platform_device *); |
extern struct bus_type platform_bus_type; |
extern struct device legacy_bus; |
/* drivers/base/power.c */ |
extern void device_shutdown(void); |
/* drivers/base/firmware.c */ |
extern int firmware_register(struct subsystem *); |
extern void firmware_unregister(struct subsystem *); |
/* debugging and troubleshooting/diagnostic helpers. */ |
#define dev_printk(level, dev, format, arg...) \ |
printk(level "%s %s: " format , (dev)->driver->name , (dev)->bus_id , ## arg) |
#ifdef DEBUG |
#define dev_dbg(dev, format, arg...) \ |
dev_printk(KERN_DEBUG , dev , format , ## arg) |
#else |
#define dev_dbg(dev, format, arg...) do {} while (0) |
#endif |
#define dev_err(dev, format, arg...) \ |
dev_printk(KERN_ERR , dev , format , ## arg) |
#define dev_info(dev, format, arg...) \ |
dev_printk(KERN_INFO , dev , format , ## arg) |
#define dev_warn(dev, format, arg...) \ |
dev_printk(KERN_WARNING , dev , format , ## arg) |
/* Create alias, so I can be autoloaded. */ |
#define MODULE_ALIAS_CHARDEV(major,minor) \ |
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ |
MODULE_ALIAS("char-major-" __stringify(major) "-*") |
#endif /* _DEVICE_H_ */ |
/* |
* device.h - generic, centralized driver model |
* |
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> |
* |
* This file is released under the GPLv2 |
* |
* See Documentation/driver-model/ for more information. |
*/ |
#ifndef _DEVICE_H_ |
#define _DEVICE_H_ |
#include <linux/config.h> |
#include <linux/ioport.h> |
#include <linux/kobject.h> |
#include <linux/list.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
#include <linux/ioport.h> |
#include <linux/module.h> |
#include <linux/pm.h> |
#include <asm/semaphore.h> |
#include <asm/atomic.h> |
#define DEVICE_NAME_SIZE 50 |
#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */ |
#define DEVICE_ID_SIZE 32 |
#define BUS_ID_SIZE KOBJ_NAME_LEN |
enum { |
SUSPEND_NOTIFY, |
SUSPEND_SAVE_STATE, |
SUSPEND_DISABLE, |
SUSPEND_POWER_DOWN, |
}; |
enum { |
RESUME_POWER_ON, |
RESUME_RESTORE_STATE, |
RESUME_ENABLE, |
}; |
struct device; |
struct device_driver; |
struct class; |
struct class_device; |
struct bus_type { |
char * name; |
struct subsystem subsys; |
struct kset drivers; |
struct kset devices; |
int (*match)(struct device * dev, struct device_driver * drv); |
struct device * (*add) (struct device * parent, char * bus_id); |
int (*hotplug) (struct device *dev, char **envp, |
int num_envp, char *buffer, int buffer_size); |
int (*suspend)(struct device * dev, u32 state); |
int (*resume)(struct device * dev); |
}; |
extern int bus_register(struct bus_type * bus); |
extern void bus_unregister(struct bus_type * bus); |
extern int bus_rescan_devices(struct bus_type * bus); |
extern struct bus_type * get_bus(struct bus_type * bus); |
extern void put_bus(struct bus_type * bus); |
extern struct bus_type * find_bus(char * name); |
/* iterator helpers for buses */ |
int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data, |
int (*fn)(struct device *, void *)); |
int bus_for_each_drv(struct bus_type * bus, struct device_driver * start, |
void * data, int (*fn)(struct device_driver *, void *)); |
/* driverfs interface for exporting bus attributes */ |
struct bus_attribute { |
struct attribute attr; |
ssize_t (*show)(struct bus_type *, char * buf); |
ssize_t (*store)(struct bus_type *, const char * buf, size_t count); |
}; |
#define BUS_ATTR(_name,_mode,_show,_store) \ |
struct bus_attribute bus_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int bus_create_file(struct bus_type *, struct bus_attribute *); |
extern void bus_remove_file(struct bus_type *, struct bus_attribute *); |
struct device_driver { |
char * name; |
struct bus_type * bus; |
struct semaphore unload_sem; |
struct kobject kobj; |
struct list_head devices; |
int (*probe) (struct device * dev); |
int (*remove) (struct device * dev); |
void (*shutdown) (struct device * dev); |
int (*suspend) (struct device * dev, u32 state, u32 level); |
int (*resume) (struct device * dev, u32 level); |
}; |
extern int driver_register(struct device_driver * drv); |
extern void driver_unregister(struct device_driver * drv); |
extern struct device_driver * get_driver(struct device_driver * drv); |
extern void put_driver(struct device_driver * drv); |
/* driverfs interface for exporting driver attributes */ |
struct driver_attribute { |
struct attribute attr; |
ssize_t (*show)(struct device_driver *, char * buf); |
ssize_t (*store)(struct device_driver *, const char * buf, size_t count); |
}; |
#define DRIVER_ATTR(_name,_mode,_show,_store) \ |
struct driver_attribute driver_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int driver_create_file(struct device_driver *, struct driver_attribute *); |
extern void driver_remove_file(struct device_driver *, struct driver_attribute *); |
/* |
* device classes |
*/ |
struct class { |
char * name; |
struct subsystem subsys; |
struct list_head children; |
struct list_head interfaces; |
int (*hotplug)(struct class_device *dev, char **envp, |
int num_envp, char *buffer, int buffer_size); |
void (*release)(struct class_device *dev); |
}; |
extern int class_register(struct class *); |
extern void class_unregister(struct class *); |
extern struct class * class_get(struct class *); |
extern void class_put(struct class *); |
struct class_attribute { |
struct attribute attr; |
ssize_t (*show)(struct class *, char * buf); |
ssize_t (*store)(struct class *, const char * buf, size_t count); |
}; |
#define CLASS_ATTR(_name,_mode,_show,_store) \ |
struct class_attribute class_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int class_create_file(struct class *, const struct class_attribute *); |
extern void class_remove_file(struct class *, const struct class_attribute *); |
struct class_device { |
struct list_head node; |
struct kobject kobj; |
struct class * class; /* required */ |
struct device * dev; /* not necessary, but nice to have */ |
void * class_data; /* class-specific data */ |
char class_id[BUS_ID_SIZE]; /* unique to this class */ |
}; |
static inline void * |
class_get_devdata (struct class_device *dev) |
{ |
return dev->class_data; |
} |
static inline void |
class_set_devdata (struct class_device *dev, void *data) |
{ |
dev->class_data = data; |
} |
extern int class_device_register(struct class_device *); |
extern void class_device_unregister(struct class_device *); |
extern void class_device_initialize(struct class_device *); |
extern int class_device_add(struct class_device *); |
extern void class_device_del(struct class_device *); |
extern int class_device_rename(struct class_device *, char *); |
extern struct class_device * class_device_get(struct class_device *); |
extern void class_device_put(struct class_device *); |
struct class_device_attribute { |
struct attribute attr; |
ssize_t (*show)(struct class_device *, char * buf); |
ssize_t (*store)(struct class_device *, const char * buf, size_t count); |
}; |
#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \ |
struct class_device_attribute class_device_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int class_device_create_file(struct class_device *, |
const struct class_device_attribute *); |
extern void class_device_remove_file(struct class_device *, |
const struct class_device_attribute *); |
struct class_interface { |
struct list_head node; |
struct class *class; |
int (*add) (struct class_device *); |
void (*remove) (struct class_device *); |
}; |
extern int class_interface_register(struct class_interface *); |
extern void class_interface_unregister(struct class_interface *); |
struct device { |
struct list_head node; /* node in sibling list */ |
struct list_head bus_list; /* node in bus's list */ |
struct list_head driver_list; |
struct list_head children; |
struct device * parent; |
struct completion * complete; /* Notification for freeing device. */ |
struct kobject kobj; |
char bus_id[BUS_ID_SIZE]; /* position on parent bus */ |
struct bus_type * bus; /* type of bus device is on */ |
struct device_driver *driver; /* which driver has allocated this |
device */ |
void *driver_data; /* data private to the driver */ |
void *platform_data; /* Platform specific data (e.g. ACPI, |
BIOS data relevant to device) */ |
struct dev_pm_info power; |
u32 power_state; /* Current operating state. In |
ACPI-speak, this is D0-D3, D0 |
being fully functional, and D3 |
being off. */ |
unsigned char *saved_state; /* saved device state */ |
u32 detach_state; /* State to enter when device is |
detached from its driver. */ |
u64 *dma_mask; /* dma mask (if dma'able device) */ |
void (*release)(struct device * dev); |
}; |
static inline struct device * |
list_to_dev(struct list_head *node) |
{ |
return list_entry(node, struct device, node); |
} |
static inline void * |
dev_get_drvdata (struct device *dev) |
{ |
return dev->driver_data; |
} |
static inline void |
dev_set_drvdata (struct device *dev, void *data) |
{ |
dev->driver_data = data; |
} |
/* |
* High level routines for use by the bus drivers |
*/ |
extern int device_register(struct device * dev); |
extern void device_unregister(struct device * dev); |
extern void device_unregister_wait(struct device * dev); |
extern void device_initialize(struct device * dev); |
extern int device_add(struct device * dev); |
extern void device_del(struct device * dev); |
extern int device_for_each_child(struct device *, void *, |
int (*fn)(struct device *, void *)); |
/* |
* Manual binding of a device to driver. See drivers/base/bus.c |
* for information on use. |
*/ |
extern void device_bind_driver(struct device * dev); |
extern void device_release_driver(struct device * dev); |
extern void driver_attach(struct device_driver * drv); |
/* driverfs interface for exporting device attributes */ |
struct device_attribute { |
struct attribute attr; |
ssize_t (*show)(struct device * dev, char * buf); |
ssize_t (*store)(struct device * dev, const char * buf, size_t count); |
}; |
#define DEVICE_ATTR(_name,_mode,_show,_store) \ |
struct device_attribute dev_attr_##_name = { \ |
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ |
.show = _show, \ |
.store = _store, \ |
}; |
extern int device_create_file(struct device *device, struct device_attribute * entry); |
extern void device_remove_file(struct device * dev, struct device_attribute * attr); |
/* |
* Platform "fixup" functions - allow the platform to have their say |
* about devices and actions that the general device layer doesn't |
* know about. |
*/ |
/* Notify platform of device discovery */ |
extern int (*platform_notify)(struct device * dev); |
extern int (*platform_notify_remove)(struct device * dev); |
/** |
* get_device - atomically increment the reference count for the device. |
* |
*/ |
extern struct device * get_device(struct device * dev); |
extern void put_device(struct device * dev); |
/* drivers/base/platform.c */ |
struct platform_device { |
char * name; |
u32 id; |
struct device dev; |
u32 num_resources; |
struct resource * resource; |
}; |
#define to_platform_device(x) container_of((x), struct platform_device, dev) |
extern int platform_device_register(struct platform_device *); |
extern void platform_device_unregister(struct platform_device *); |
extern struct bus_type platform_bus_type; |
extern struct device legacy_bus; |
/* drivers/base/power.c */ |
extern void device_shutdown(void); |
/* drivers/base/firmware.c */ |
extern int firmware_register(struct subsystem *); |
extern void firmware_unregister(struct subsystem *); |
/* debugging and troubleshooting/diagnostic helpers. */ |
#define dev_printk(level, dev, format, arg...) \ |
printk(level "%s %s: " format , (dev)->driver->name , (dev)->bus_id , ## arg) |
#ifdef DEBUG |
#define dev_dbg(dev, format, arg...) \ |
dev_printk(KERN_DEBUG , dev , format , ## arg) |
#else |
#define dev_dbg(dev, format, arg...) do {} while (0) |
#endif |
#ifdef DEBUG //** for shark usb |
#define dev_err(dev, format, arg...) \ |
dev_printk(KERN_ERR , dev , format , ## arg) |
#define dev_info(dev, format, arg...) \ |
dev_printk(KERN_INFO , dev , format , ## arg) |
#define dev_warn(dev, format, arg...) \ |
dev_printk(KERN_WARNING , dev , format , ## arg) |
#else |
#define dev_err(dev, format, arg...) |
#define dev_info(dev, format, arg...) |
#define dev_warn(dev, format, arg...) |
#endif |
/* Create alias, so I can be autoloaded. */ |
#define MODULE_ALIAS_CHARDEV(major,minor) \ |
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ |
MODULE_ALIAS("char-major-" __stringify(major) "-*") |
#endif /* _DEVICE_H_ */ |
/shark/trunk/drivers/linuxc26/include/linux/kernel.h |
---|
57,10 → 57,8 |
extern struct notifier_block *panic_notifier_list; |
void panic(const char * fmt, ...) |
__attribute__ ((format (printf, 1, 2))); |
asmlinkage NORET_TYPE void do_exit(long error_code) |
ATTRIB_NORET; |
NORET_TYPE void complete_and_exit(struct completion *, long) |
ATTRIB_NORET; |
void do_exit(long error_code); |
void complete_and_exit(struct completion *, long); |
extern int abs(int); |
extern unsigned long simple_strtoul(const char *,char **,unsigned int); |
extern long simple_strtol(const char *,char **,unsigned int); |
/shark/trunk/drivers/linuxc26/include/linux/usb.h |
---|
1,1048 → 1,1055 |
#ifndef __LINUX_USB_H |
#define __LINUX_USB_H |
#include <linux/mod_devicetable.h> |
#include <linux/usb_ch9.h> |
#define USB_MAJOR 180 |
#ifdef __KERNEL__ |
#include <linux/config.h> |
#include <linux/errno.h> /* for -ENODEV */ |
#include <linux/delay.h> /* for mdelay() */ |
#include <linux/interrupt.h> /* for in_interrupt() */ |
#include <linux/list.h> /* for struct list_head */ |
#include <linux/device.h> /* for struct device */ |
#include <linux/fs.h> /* for struct file_operations */ |
#include <linux/completion.h> /* for struct completion */ |
#include <linux/sched.h> /* for current && schedule_timeout */ |
static __inline__ void wait_ms(unsigned int ms) |
{ |
if(!in_interrupt()) { |
current->state = TASK_UNINTERRUPTIBLE; |
schedule_timeout(1 + ms * HZ / 1000); |
} |
else |
mdelay(ms); |
} |
struct usb_device; |
/*-------------------------------------------------------------------------*/ |
/* |
* Host-side wrappers for standard USB descriptors ... these are parsed |
* from the data provided by devices. Parsing turns them from a flat |
* sequence of descriptors into a hierarchy: |
* |
* - devices have one (usually) or more configs; |
* - configs have one (often) or more interfaces; |
* - interfaces have one (usually) or more settings; |
* - each interface setting has zero or (usually) more endpoints. |
* |
* And there might be other descriptors mixed in with those. |
* |
* Devices may also have class-specific or vendor-specific descriptors. |
*/ |
/* host-side wrapper for parsed endpoint descriptors */ |
struct usb_host_endpoint { |
struct usb_endpoint_descriptor desc; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
/* host-side wrapper for one interface setting's parsed descriptors */ |
struct usb_host_interface { |
struct usb_interface_descriptor desc; |
/* array of desc.bNumEndpoint endpoints associated with this |
* interface setting. these will be in no particular order. |
*/ |
struct usb_host_endpoint *endpoint; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
/** |
* struct usb_interface - what usb device drivers talk to |
* @altsetting: array of interface descriptors, one for each alternate |
* setting that may be selected. Each one includes a set of |
* endpoint configurations and will be in numberic order, |
* 0..num_altsetting. |
* @num_altsetting: number of altsettings defined. |
* @act_altsetting: index of current altsetting. this number is always |
* less than num_altsetting. after the device is configured, each |
* interface uses its default setting of zero. |
* @driver: the USB driver that is bound to this interface. |
* @minor: the minor number assigned to this interface, if this |
* interface is bound to a driver that uses the USB major number. |
* If this interface does not use the USB major, this field should |
* be unused. The driver should set this value in the probe() |
* function of the driver, after it has been assigned a minor |
* number from the USB core by calling usb_register_dev(). |
* @dev: driver model's view of this device |
* @class_dev: driver model's class view of this device. |
* |
* USB device drivers attach to interfaces on a physical device. Each |
* interface encapsulates a single high level function, such as feeding |
* an audio stream to a speaker or reporting a change in a volume control. |
* Many USB devices only have one interface. The protocol used to talk to |
* an interface's endpoints can be defined in a usb "class" specification, |
* or by a product's vendor. The (default) control endpoint is part of |
* every interface, but is never listed among the interface's descriptors. |
* |
* The driver that is bound to the interface can use standard driver model |
* calls such as dev_get_drvdata() on the dev member of this structure. |
* |
* Each interface may have alternate settings. The initial configuration |
* of a device sets the first of these, but the device driver can change |
* that setting using usb_set_interface(). Alternate settings are often |
* used to control the the use of periodic endpoints, such as by having |
* different endpoints use different amounts of reserved USB bandwidth. |
* All standards-conformant USB devices that use isochronous endpoints |
* will use them in non-default settings. |
*/ |
struct usb_interface { |
/* array of alternate settings for this interface. |
* these will be in numeric order, 0..num_altsettting |
*/ |
struct usb_host_interface *altsetting; |
unsigned act_altsetting; /* active alternate setting */ |
unsigned num_altsetting; /* number of alternate settings */ |
struct usb_driver *driver; /* driver */ |
int minor; /* minor number this interface is bound to */ |
struct device dev; /* interface specific device info */ |
struct class_device *class_dev; |
}; |
#define to_usb_interface(d) container_of(d, struct usb_interface, dev) |
#define interface_to_usbdev(intf) \ |
container_of(intf->dev.parent, struct usb_device, dev) |
static inline void *usb_get_intfdata (struct usb_interface *intf) |
{ |
return dev_get_drvdata (&intf->dev); |
} |
static inline void usb_set_intfdata (struct usb_interface *intf, void *data) |
{ |
dev_set_drvdata(&intf->dev, data); |
} |
/* this maximum is arbitrary */ |
#define USB_MAXINTERFACES 32 |
/* USB_DT_CONFIG: Configuration descriptor information. |
* |
* USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the |
* descriptor type is different. Highspeed-capable devices can look |
* different depending on what speed they're currently running. Only |
* devices with a USB_DT_DEVICE_QUALIFIER have an OTHER_SPEED_CONFIG. |
*/ |
struct usb_host_config { |
struct usb_config_descriptor desc; |
/* the interfaces associated with this configuration |
* these will be in numeric order, 0..desc.bNumInterfaces |
*/ |
struct usb_interface *interface[USB_MAXINTERFACES]; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
// FIXME remove; exported only for drivers/usb/misc/auserwald.c |
// prefer usb_device->epnum[0..31] |
extern struct usb_endpoint_descriptor * |
usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum); |
int __usb_get_extra_descriptor(char *buffer, unsigned size, |
unsigned char type, void **ptr); |
#define usb_get_extra_descriptor(ifpoint,type,ptr)\ |
__usb_get_extra_descriptor((ifpoint)->extra,(ifpoint)->extralen,\ |
type,(void**)ptr) |
/* -------------------------------------------------------------------------- */ |
struct usb_operations; |
/* USB device number allocation bitmap */ |
struct usb_devmap { |
unsigned long devicemap[128 / (8*sizeof(unsigned long))]; |
}; |
/* |
* Allocated per bus (tree of devices) we have: |
*/ |
struct usb_bus { |
struct device *controller; /* host/master side hardware */ |
int busnum; /* Bus number (in order of reg) */ |
char *bus_name; /* stable id (PCI slot_name etc) */ |
int devnum_next; /* Next open device number in round-robin allocation */ |
struct usb_devmap devmap; /* device address allocation map */ |
struct usb_operations *op; /* Operations (specific to the HC) */ |
struct usb_device *root_hub; /* Root hub */ |
struct list_head bus_list; /* list of busses */ |
void *hcpriv; /* Host Controller private data */ |
int bandwidth_allocated; /* on this bus: how much of the time |
* reserved for periodic (intr/iso) |
* requests is used, on average? |
* Units: microseconds/frame. |
* Limits: Full/low speed reserve 90%, |
* while high speed reserves 80%. |
*/ |
int bandwidth_int_reqs; /* number of Interrupt requests */ |
int bandwidth_isoc_reqs; /* number of Isoc. requests */ |
struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ |
struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the bus */ |
struct class_device class_dev; /* class device for this bus */ |
void (*release)(struct usb_bus *bus); /* function to destroy this bus's memory */ |
}; |
#define to_usb_bus(d) container_of(d, struct usb_bus, class_dev) |
/* -------------------------------------------------------------------------- */ |
/* This is arbitrary. |
* From USB 2.0 spec Table 11-13, offset 7, a hub can |
* have up to 255 ports. The most yet reported is 10. |
*/ |
#define USB_MAXCHILDREN (16) |
struct usb_tt; |
struct usb_device { |
int devnum; /* Address on USB bus */ |
char devpath [16]; /* Use in messages: /port/port/... */ |
enum usb_device_state state; /* configured, not attached, etc */ |
enum usb_device_speed speed; /* high/full/low (or error) */ |
struct usb_tt *tt; /* low/full speed dev, highspeed hub */ |
int ttport; /* device port on that tt hub */ |
struct semaphore serialize; |
unsigned int toggle[2]; /* one bit for each endpoint ([0] = IN, [1] = OUT) */ |
unsigned int halted[2]; /* endpoint halts; one bit per endpoint # & direction; */ |
/* [0] = IN, [1] = OUT */ |
int epmaxpacketin[16]; /* INput endpoint specific maximums */ |
int epmaxpacketout[16]; /* OUTput endpoint specific maximums */ |
struct usb_device *parent; /* our hub, unless we're the root */ |
struct usb_bus *bus; /* Bus we're part of */ |
struct device dev; /* Generic device interface */ |
struct usb_device_descriptor descriptor;/* Descriptor */ |
struct usb_host_config *config; /* All of the configs */ |
struct usb_host_config *actconfig;/* the active configuration */ |
char **rawdescriptors; /* Raw descriptors for each config */ |
int have_langid; /* whether string_langid is valid yet */ |
int string_langid; /* language ID for strings */ |
void *hcpriv; /* Host Controller private data */ |
struct list_head filelist; |
struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ |
struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the device */ |
/* |
* Child devices - these can be either new devices |
* (if this is a hub device), or different instances |
* of this same device. |
* |
* Each instance needs its own set of data structures. |
*/ |
int maxchild; /* Number of ports if hub */ |
struct usb_device *children[USB_MAXCHILDREN]; |
}; |
#define to_usb_device(d) container_of(d, struct usb_device, dev) |
extern struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *); |
extern struct usb_device *usb_get_dev(struct usb_device *dev); |
extern void usb_put_dev(struct usb_device *dev); |
/* mostly for devices emulating SCSI over USB */ |
extern int usb_reset_device(struct usb_device *dev); |
extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); |
/* for drivers using iso endpoints */ |
extern int usb_get_current_frame_number (struct usb_device *usb_dev); |
/* used these for multi-interface device registration */ |
extern int usb_driver_claim_interface(struct usb_driver *driver, |
struct usb_interface *iface, void* priv); |
extern int usb_interface_claimed(struct usb_interface *iface); |
extern void usb_driver_release_interface(struct usb_driver *driver, |
struct usb_interface *iface); |
const struct usb_device_id *usb_match_id(struct usb_interface *interface, |
const struct usb_device_id *id); |
extern struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor); |
extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum); |
/** |
* usb_make_path - returns stable device path in the usb tree |
* @dev: the device whose path is being constructed |
* @buf: where to put the string |
* @size: how big is "buf"? |
* |
* Returns length of the string (> 0) or negative if size was too small. |
* |
* This identifier is intended to be "stable", reflecting physical paths in |
* hardware such as physical bus addresses for host controllers or ports on |
* USB hubs. That makes it stay the same until systems are physically |
* reconfigured, by re-cabling a tree of USB devices or by moving USB host |
* controllers. Adding and removing devices, including virtual root hubs |
* in host controller driver modules, does not change these path identifers; |
* neither does rebooting or re-enumerating. These are more useful identifiers |
* than changeable ("unstable") ones like bus numbers or device addresses. |
* |
* With a partial exception for devices connected to USB 2.0 root hubs, these |
* identifiers are also predictable. So long as the device tree isn't changed, |
* plugging any USB device into a given hub port always gives it the same path. |
* Because of the use of "companion" controllers, devices connected to ports on |
* USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are |
* high speed, and a different one if they are full or low speed. |
*/ |
static inline int usb_make_path (struct usb_device *dev, char *buf, size_t size) |
{ |
int actual; |
actual = snprintf (buf, size, "usb-%s-%s", dev->bus->bus_name, dev->devpath); |
return (actual >= (int)size) ? -1 : actual; |
} |
/*-------------------------------------------------------------------------*/ |
#define USB_DEVICE_ID_MATCH_DEVICE (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) |
#define USB_DEVICE_ID_MATCH_DEV_RANGE (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI) |
#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE) |
#define USB_DEVICE_ID_MATCH_DEV_INFO \ |
(USB_DEVICE_ID_MATCH_DEV_CLASS | USB_DEVICE_ID_MATCH_DEV_SUBCLASS | USB_DEVICE_ID_MATCH_DEV_PROTOCOL) |
#define USB_DEVICE_ID_MATCH_INT_INFO \ |
(USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL) |
/** |
* USB_DEVICE - macro used to describe a specific usb device |
* @vend: the 16 bit USB Vendor ID |
* @prod: the 16 bit USB Product ID |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific device. |
*/ |
#define USB_DEVICE(vend,prod) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = (vend), .idProduct = (prod) |
/** |
* USB_DEVICE_VER - macro used to describe a specific usb device with a version range |
* @vend: the 16 bit USB Vendor ID |
* @prod: the 16 bit USB Product ID |
* @lo: the bcdDevice_lo value |
* @hi: the bcdDevice_hi value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific device, with a version range. |
*/ |
#define USB_DEVICE_VER(vend,prod,lo,hi) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = (vend), .idProduct = (prod), .bcdDevice_lo = (lo), .bcdDevice_hi = (hi) |
/** |
* USB_DEVICE_INFO - macro used to describe a class of usb devices |
* @cl: bDeviceClass value |
* @sc: bDeviceSubClass value |
* @pr: bDeviceProtocol value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific class of devices. |
*/ |
#define USB_DEVICE_INFO(cl,sc,pr) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, .bDeviceClass = (cl), .bDeviceSubClass = (sc), .bDeviceProtocol = (pr) |
/** |
* USB_INTERFACE_INFO - macro used to describe a class of usb interfaces |
* @cl: bInterfaceClass value |
* @sc: bInterfaceSubClass value |
* @pr: bInterfaceProtocol value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific class of interfaces. |
*/ |
#define USB_INTERFACE_INFO(cl,sc,pr) \ |
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO, .bInterfaceClass = (cl), .bInterfaceSubClass = (sc), .bInterfaceProtocol = (pr) |
/* -------------------------------------------------------------------------- */ |
/** |
* struct usb_driver - identifies USB driver to usbcore |
* @owner: Pointer to the module owner of this driver; initialize |
* it using THIS_MODULE. |
* @name: The driver name should be unique among USB drivers, |
* and should normally be the same as the module name. |
* @probe: Called to see if the driver is willing to manage a particular |
* interface on a device. If it is, probe returns zero and uses |
* dev_set_drvdata() to associate driver-specific data with the |
* interface. It may also use usb_set_interface() to specify the |
* appropriate altsetting. If unwilling to manage the interface, |
* return a negative errno value. |
* @disconnect: Called when the interface is no longer accessible, usually |
* because its device has been (or is being) disconnected or the |
* driver module is being unloaded. |
* @ioctl: Used for drivers that want to talk to userspace through |
* the "usbfs" filesystem. This lets devices provide ways to |
* expose information to user space regardless of where they |
* do (or don't) show up otherwise in the filesystem. |
* @suspend: Called when the device is going to be suspended by the system. |
* @resume: Called when the device is being resumed by the system. |
* @id_table: USB drivers use ID table to support hotplugging. |
* Export this with MODULE_DEVICE_TABLE(usb,...). This must be set |
* or your driver's probe function will never get called. |
* @driver: the driver model core driver structure. |
* @serialize: a semaphore used to serialize access to this driver. Used |
* in the probe and disconnect functions. Only the USB core should use |
* this lock. |
* |
* USB drivers must provide a name, probe() and disconnect() methods, |
* and an id_table. Other driver fields are optional. |
* |
* The id_table is used in hotplugging. It holds a set of descriptors, |
* and specialized data may be associated with each entry. That table |
* is used by both user and kernel mode hotplugging support. |
* |
* The probe() and disconnect() methods are called in a context where |
* they can sleep, but they should avoid abusing the privilege. Most |
* work to connect to a device should be done when the device is opened, |
* and undone at the last close. The disconnect code needs to address |
* concurrency issues with respect to open() and close() methods, as |
* well as forcing all pending I/O requests to complete (by unlinking |
* them as necessary, and blocking until the unlinks complete). |
*/ |
struct usb_driver { |
struct module *owner; |
const char *name; |
int (*probe) (struct usb_interface *intf, |
const struct usb_device_id *id); |
void (*disconnect) (struct usb_interface *intf); |
int (*ioctl) (struct usb_interface *intf, unsigned int code, void *buf); |
int (*suspend) (struct usb_interface *intf, u32 state); |
int (*resume) (struct usb_interface *intf); |
const struct usb_device_id *id_table; |
struct device_driver driver; |
struct semaphore serialize; |
}; |
#define to_usb_driver(d) container_of(d, struct usb_driver, driver) |
extern struct bus_type usb_bus_type; |
/** |
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number |
* @name: devfs name for this driver. Will also be used by the driver |
* class code to create a usb class device. |
* @fops: pointer to the struct file_operations of this driver. |
* @mode: the mode for the devfs file to be created for this driver. |
* @minor_base: the start of the minor range for this driver. |
* |
* This structure is used for the usb_register_dev() and |
* usb_unregister_dev() functions, to consolodate a number of the |
* paramaters used for them. |
*/ |
struct usb_class_driver { |
char *name; |
struct file_operations *fops; |
mode_t mode; |
int minor_base; |
}; |
/* |
* use these in module_init()/module_exit() |
* and don't forget MODULE_DEVICE_TABLE(usb, ...) |
*/ |
extern int usb_register(struct usb_driver *); |
extern void usb_deregister(struct usb_driver *); |
extern int usb_register_dev(struct usb_interface *intf, |
struct usb_class_driver *class_driver); |
extern void usb_deregister_dev(struct usb_interface *intf, |
struct usb_class_driver *class_driver); |
extern int usb_disabled(void); |
/* -------------------------------------------------------------------------- */ |
/* |
* URB support, for asynchronous request completions |
*/ |
/* |
* urb->transfer_flags: |
*/ |
#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ |
#define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */ |
#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ |
#define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */ |
#define URB_ASYNC_UNLINK 0x0010 /* usb_unlink_urb() returns asap */ |
#define URB_NO_FSBR 0x0020 /* UHCI-specific */ |
#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */ |
#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */ |
struct usb_iso_packet_descriptor { |
unsigned int offset; |
unsigned int length; /* expected length */ |
unsigned int actual_length; |
unsigned int status; |
}; |
struct urb; |
struct pt_regs; |
typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); |
/** |
* struct urb - USB Request Block |
* @urb_list: For use by current owner of the URB. |
* @pipe: Holds endpoint number, direction, type, and more. |
* Create these values with the eight macros available; |
* usb_{snd,rcv}TYPEpipe(dev,endpoint), where the type is "ctrl" |
* (control), "bulk", "int" (interrupt), or "iso" (isochronous). |
* For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint |
* numbers range from zero to fifteen. Note that "in" endpoint two |
* is a different endpoint (and pipe) from "out" endpoint two. |
* The current configuration controls the existence, type, and |
* maximum packet size of any given endpoint. |
* @dev: Identifies the USB device to perform the request. |
* @status: This is read in non-iso completion functions to get the |
* status of the particular request. ISO requests only use it |
* to tell whether the URB was unlinked; detailed status for |
* each frame is in the fields of the iso_frame-desc. |
* @transfer_flags: A variety of flags may be used to affect how URB |
* submission, unlinking, or operation are handled. Different |
* kinds of URB can use different flags. |
* @transfer_buffer: This identifies the buffer to (or from) which |
* the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP |
* is set). This buffer must be suitable for DMA; allocate it with |
* kmalloc() or equivalent. For transfers to "in" endpoints, contents |
* of this buffer will be modified. This buffer is used for data |
* phases of control transfers. |
* @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, |
* the device driver is saying that it provided this DMA address, |
* which the host controller driver should use in preference to the |
* transfer_buffer. |
* @transfer_buffer_length: How big is transfer_buffer. The transfer may |
* be broken up into chunks according to the current maximum packet |
* size for the endpoint, which is a function of the configuration |
* and is encoded in the pipe. When the length is zero, neither |
* transfer_buffer nor transfer_dma is used. |
* @actual_length: This is read in non-iso completion functions, and |
* it tells how many bytes (out of transfer_buffer_length) were |
* transferred. It will normally be the same as requested, unless |
* either an error was reported or a short read was performed. |
* The URB_SHORT_NOT_OK transfer flag may be used to make such |
* short reads be reported as errors. |
* @setup_packet: Only used for control transfers, this points to eight bytes |
* of setup data. Control transfers always start by sending this data |
* to the device. Then transfer_buffer is read or written, if needed. |
* @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the |
* device driver has provided this DMA address for the setup packet. |
* The host controller driver should use this in preference to |
* setup_packet. |
* @start_frame: Returns the initial frame for interrupt or isochronous |
* transfers. |
* @number_of_packets: Lists the number of ISO transfer buffers. |
* @interval: Specifies the polling interval for interrupt or isochronous |
* transfers. The units are frames (milliseconds) for for full and low |
* speed devices, and microframes (1/8 millisecond) for highspeed ones. |
* @error_count: Returns the number of ISO transfers that reported errors. |
* @context: For use in completion functions. This normally points to |
* request-specific driver context. |
* @complete: Completion handler. This URB is passed as the parameter to the |
* completion function. The completion function may then do what |
* it likes with the URB, including resubmitting or freeing it. |
* @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to |
* collect the transfer status for each buffer. |
* @timeout: If set to zero, the urb will never timeout. Otherwise this is |
* the time in jiffies that this urb will timeout in. |
* |
* This structure identifies USB transfer requests. URBs must be allocated by |
* calling usb_alloc_urb() and freed with a call to usb_free_urb(). |
* Initialization may be done using various usb_fill_*_urb() functions. URBs |
* are submitted using usb_submit_urb(), and pending requests may be canceled |
* using usb_unlink_urb(). |
* |
* Data Transfer Buffers: |
* |
* Normally drivers provide I/O buffers allocated with kmalloc() or otherwise |
* taken from the general page pool. That is provided by transfer_buffer |
* (control requests also use setup_packet), and host controller drivers |
* perform a dma mapping (and unmapping) for each buffer transferred. Those |
* mapping operations can be expensive on some platforms (perhaps using a dma |
* bounce buffer or talking to an IOMMU), |
* although they're cheap on commodity x86 and ppc hardware. |
* |
* Alternatively, drivers may pass the URB_NO_xxx_DMA_MAP transfer flags, |
* which tell the host controller driver that no such mapping is needed since |
* the device driver is DMA-aware. For example, a device driver might |
* allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map(). |
* When these transfer flags are provided, host controller drivers will |
* attempt to use the dma addresses found in the transfer_dma and/or |
* setup_dma fields rather than determining a dma address themselves. (Note |
* that transfer_buffer and setup_packet must still be set because not all |
* host controllers use DMA, nor do virtual root hubs). |
* |
* Initialization: |
* |
* All URBs submitted must initialize dev, pipe, |
* transfer_flags (may be zero), complete, timeout (may be zero). |
* The URB_ASYNC_UNLINK transfer flag affects later invocations of |
* the usb_unlink_urb() routine. |
* |
* All URBs must also initialize |
* transfer_buffer and transfer_buffer_length. They may provide the |
* URB_SHORT_NOT_OK transfer flag, indicating that short reads are |
* to be treated as errors; that flag is invalid for write requests. |
* |
* Bulk URBs may |
* use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers |
* should always terminate with a short packet, even if it means adding an |
* extra zero length packet. |
* |
* Control URBs must provide a setup_packet. The setup_packet and |
* transfer_buffer may each be mapped for DMA or not, independently of |
* the other. The transfer_flags bits URB_NO_TRANSFER_DMA_MAP and |
* URB_NO_SETUP_DMA_MAP indicate which buffers have already been mapped. |
* URB_NO_SETUP_DMA_MAP is ignored for non-control URBs. |
* |
* Interrupt UBS must provide an interval, saying how often (in milliseconds |
* or, for highspeed devices, 125 microsecond units) |
* to poll for transfers. After the URB has been submitted, the interval |
* and start_frame fields reflect how the transfer was actually scheduled. |
* The polling interval may be more frequent than requested. |
* For example, some controllers have a maximum interval of 32 microseconds, |
* while others support intervals of up to 1024 microseconds. |
* Isochronous URBs also have transfer intervals. (Note that for isochronous |
* endpoints, as well as high speed interrupt endpoints, the encoding of |
* the transfer interval in the endpoint descriptor is logarithmic.) |
* |
* Isochronous URBs normally use the URB_ISO_ASAP transfer flag, telling |
* the host controller to schedule the transfer as soon as bandwidth |
* utilization allows, and then set start_frame to reflect the actual frame |
* selected during submission. Otherwise drivers must specify the start_frame |
* and handle the case where the transfer can't begin then. However, drivers |
* won't know how bandwidth is currently allocated, and while they can |
* find the current frame using usb_get_current_frame_number () they can't |
* know the range for that frame number. (Ranges for frame counter values |
* are HC-specific, and can go from 256 to 65536 frames from "now".) |
* |
* Isochronous URBs have a different data transfer model, in part because |
* the quality of service is only "best effort". Callers provide specially |
* allocated URBs, with number_of_packets worth of iso_frame_desc structures |
* at the end. Each such packet is an individual ISO transfer. Isochronous |
* URBs are normally queued, submitted by drivers to arrange that |
* transfers are at least double buffered, and then explicitly resubmitted |
* in completion handlers, so |
* that data (such as audio or video) streams at as constant a rate as the |
* host controller scheduler can support. |
* |
* Completion Callbacks: |
* |
* The completion callback is made in_interrupt(), and one of the first |
* things that a completion handler should do is check the status field. |
* The status field is provided for all URBs. It is used to report |
* unlinked URBs, and status for all non-ISO transfers. It should not |
* be examined before the URB is returned to the completion handler. |
* |
* The context field is normally used to link URBs back to the relevant |
* driver or request state. |
* |
* When completion callback is invoked for non-isochronous URBs, the |
* actual_length field tells how many bytes were transferred. |
* |
* ISO transfer status is reported in the status and actual_length fields |
* of the iso_frame_desc array, and the number of errors is reported in |
* error_count. Completion callbacks for ISO transfers will normally |
* (re)submit URBs to ensure a constant transfer rate. |
*/ |
struct urb |
{ |
/* private, usb core and host controller only fields in the urb */ |
spinlock_t lock; /* lock for the URB */ |
atomic_t count; /* reference count of the URB */ |
void *hcpriv; /* private data for host controller */ |
struct list_head urb_list; /* list pointer to all active urbs */ |
int bandwidth; /* bandwidth for INT/ISO request */ |
/* public, documented fields in the urb that can be used by drivers */ |
struct usb_device *dev; /* (in) pointer to associated device */ |
unsigned int pipe; /* (in) pipe information */ |
int status; /* (return) non-ISO status */ |
unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ |
void *transfer_buffer; /* (in) associated data buffer */ |
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ |
int transfer_buffer_length; /* (in) data buffer length */ |
int actual_length; /* (return) actual transfer length */ |
unsigned char *setup_packet; /* (in) setup packet (control only) */ |
dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ |
int start_frame; /* (modify) start frame (INT/ISO) */ |
int number_of_packets; /* (in) number of ISO packets */ |
int interval; /* (in) transfer interval (INT/ISO) */ |
int error_count; /* (return) number of ISO errors */ |
int timeout; /* (in) timeout, in jiffies */ |
void *context; /* (in) context for completion */ |
usb_complete_t complete; /* (in) completion routine */ |
struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */ |
}; |
/* -------------------------------------------------------------------------- */ |
/** |
* usb_fill_control_urb - initializes a control urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @setup_packet: pointer to the setup_packet buffer |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* |
* Initializes a control urb with the proper information needed to submit |
* it to a device. |
*/ |
static inline void usb_fill_control_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
unsigned char *setup_packet, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->setup_packet = setup_packet; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
} |
/** |
* usb_fill_bulk_urb - macro to help initialize a bulk urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* |
* Initializes a bulk urb with the proper information needed to submit it |
* to a device. |
*/ |
static inline void usb_fill_bulk_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
} |
/** |
* usb_fill_int_urb - macro to help initialize a interrupt urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* @interval: what to set the urb interval to, encoded like |
* the endpoint descriptor's bInterval value. |
* |
* Initializes a interrupt urb with the proper information needed to submit |
* it to a device. |
* Note that high speed interrupt endpoints use a logarithmic encoding of |
* the endpoint interval, and express polling intervals in microframes |
* (eight per millisecond) rather than in frames (one per millisecond). |
*/ |
static inline void usb_fill_int_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context, |
int interval) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
if (dev->speed == USB_SPEED_HIGH) |
urb->interval = 1 << (interval - 1); |
else |
urb->interval = interval; |
urb->start_frame = -1; |
} |
extern void usb_init_urb(struct urb *urb); |
extern struct urb *usb_alloc_urb(int iso_packets, int mem_flags); |
extern void usb_free_urb(struct urb *urb); |
#define usb_put_urb usb_free_urb |
extern struct urb *usb_get_urb(struct urb *urb); |
extern int usb_submit_urb(struct urb *urb, int mem_flags); |
extern int usb_unlink_urb(struct urb *urb); |
#define HAVE_USB_BUFFERS |
void *usb_buffer_alloc (struct usb_device *dev, size_t size, |
int mem_flags, dma_addr_t *dma); |
void usb_buffer_free (struct usb_device *dev, size_t size, |
void *addr, dma_addr_t dma); |
struct urb *usb_buffer_map (struct urb *urb); |
void usb_buffer_dmasync (struct urb *urb); |
void usb_buffer_unmap (struct urb *urb); |
struct scatterlist; |
int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int nents); |
void usb_buffer_dmasync_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int n_hw_ents); |
void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int n_hw_ents); |
/*-------------------------------------------------------------------* |
* SYNCHRONOUS CALL SUPPORT * |
*-------------------------------------------------------------------*/ |
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, |
__u8 request, __u8 requesttype, __u16 value, __u16 index, |
void *data, __u16 size, int timeout); |
extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, |
void *data, int len, int *actual_length, |
int timeout); |
/* wrappers around usb_control_msg() for the most common standard requests */ |
extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, |
unsigned char descindex, void *buf, int size); |
extern int usb_get_device_descriptor(struct usb_device *dev); |
extern int usb_get_status(struct usb_device *dev, |
int type, int target, void *data); |
extern int usb_get_string(struct usb_device *dev, |
unsigned short langid, unsigned char index, void *buf, int size); |
extern int usb_string(struct usb_device *dev, int index, |
char *buf, size_t size); |
/* wrappers that also update important state inside usbcore */ |
extern int usb_clear_halt(struct usb_device *dev, int pipe); |
extern int usb_reset_configuration(struct usb_device *dev); |
extern int usb_set_configuration(struct usb_device *dev, int configuration); |
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); |
/* |
* timeouts, in seconds, used for sending/receiving control messages |
* they typically complete within a few frames (msec) after they're issued |
* USB identifies 5 second timeouts, maybe more in a few cases, and a few |
* slow devices (like some MGE Ellipse UPSes) actually push that limit. |
*/ |
#define USB_CTRL_GET_TIMEOUT 5 |
#define USB_CTRL_SET_TIMEOUT 5 |
/** |
* struct usb_sg_request - support for scatter/gather I/O |
* @status: zero indicates success, else negative errno |
* @bytes: counts bytes transferred. |
* |
* These requests are initialized using usb_sg_init(), and then are used |
* as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most |
* members of the request object aren't for driver access. |
* |
* The status and bytecount values are valid only after usb_sg_wait() |
* returns. If the status is zero, then the bytecount matches the total |
* from the request. |
* |
* After an error completion, drivers may need to clear a halt condition |
* on the endpoint. |
*/ |
struct usb_sg_request { |
int status; |
size_t bytes; |
/* |
* members below are private to usbcore, |
* and are not provided for driver access! |
*/ |
spinlock_t lock; |
struct usb_device *dev; |
int pipe; |
struct scatterlist *sg; |
int nents; |
int entries; |
struct urb **urbs; |
int count; |
struct completion complete; |
}; |
int usb_sg_init ( |
struct usb_sg_request *io, |
struct usb_device *dev, |
unsigned pipe, |
unsigned period, |
struct scatterlist *sg, |
int nents, |
size_t length, |
int mem_flags |
); |
void usb_sg_cancel (struct usb_sg_request *io); |
void usb_sg_wait (struct usb_sg_request *io); |
/* -------------------------------------------------------------------------- */ |
/* |
* Calling this entity a "pipe" is glorifying it. A USB pipe |
* is something embarrassingly simple: it basically consists |
* of the following information: |
* - device number (7 bits) |
* - endpoint number (4 bits) |
* - current Data0/1 state (1 bit) [Historical; now gone] |
* - direction (1 bit) |
* - speed (1 bit) [Historical and specific to USB 1.1; now gone.] |
* - max packet size (2 bits: 8, 16, 32 or 64) [Historical; now gone.] |
* - pipe type (2 bits: control, interrupt, bulk, isochronous) |
* |
* That's 18 bits. Really. Nothing more. And the USB people have |
* documented these eighteen bits as some kind of glorious |
* virtual data structure. |
* |
* Let's not fall in that trap. We'll just encode it as a simple |
* unsigned int. The encoding is: |
* |
* - max size: bits 0-1 [Historical; now gone.] |
* - direction: bit 7 (0 = Host-to-Device [Out], |
* 1 = Device-to-Host [In] ... |
* like endpoint bEndpointAddress) |
* - device: bits 8-14 ... bit positions known to uhci-hcd |
* - endpoint: bits 15-18 ... bit positions known to uhci-hcd |
* - Data0/1: bit 19 [Historical; now gone. ] |
* - lowspeed: bit 26 [Historical; now gone. ] |
* - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt, |
* 10 = control, 11 = bulk) |
* |
* Why? Because it's arbitrary, and whatever encoding we select is really |
* up to us. This one happens to share a lot of bit positions with the UHCI |
* specification, so that much of the uhci driver can just mask the bits |
* appropriately. |
*/ |
/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ |
#define PIPE_ISOCHRONOUS 0 |
#define PIPE_INTERRUPT 1 |
#define PIPE_CONTROL 2 |
#define PIPE_BULK 3 |
#define usb_maxpacket(dev, pipe, out) (out \ |
? (dev)->epmaxpacketout[usb_pipeendpoint(pipe)] \ |
: (dev)->epmaxpacketin [usb_pipeendpoint(pipe)] ) |
#define usb_pipein(pipe) ((pipe) & USB_DIR_IN) |
#define usb_pipeout(pipe) (!usb_pipein(pipe)) |
#define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f) |
#define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf) |
#define usb_pipetype(pipe) (((pipe) >> 30) & 3) |
#define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS) |
#define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT) |
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) |
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) |
/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ |
#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) |
#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) |
#define usb_settoggle(dev, ep, out, bit) ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | ((bit) << (ep))) |
/* Endpoint halt control/status ... likewise USE WITH CAUTION */ |
#define usb_endpoint_running(dev, ep, out) ((dev)->halted[out] &= ~(1 << (ep))) |
#define usb_endpoint_halted(dev, ep, out) ((dev)->halted[out] & (1 << (ep))) |
static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint) |
{ |
return (dev->devnum << 8) | (endpoint << 15); |
} |
/* Create various pipes... */ |
#define usb_sndctrlpipe(dev,endpoint) ((PIPE_CONTROL << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvctrlpipe(dev,endpoint) ((PIPE_CONTROL << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndisocpipe(dev,endpoint) ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvisocpipe(dev,endpoint) ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndbulkpipe(dev,endpoint) ((PIPE_BULK << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvbulkpipe(dev,endpoint) ((PIPE_BULK << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndintpipe(dev,endpoint) ((PIPE_INTERRUPT << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvintpipe(dev,endpoint) ((PIPE_INTERRUPT << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
/* -------------------------------------------------------------------------- */ |
/* |
* Debugging and troubleshooting/diagnostic helpers. |
*/ |
void usb_show_device_descriptor(struct usb_device_descriptor *); |
void usb_show_config_descriptor(struct usb_config_descriptor *); |
void usb_show_interface_descriptor(struct usb_interface_descriptor *); |
void usb_show_endpoint_descriptor(struct usb_endpoint_descriptor *); |
void usb_show_device(struct usb_device *); |
void usb_show_string(struct usb_device *dev, char *id, int index); |
#ifdef DEBUG |
#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , ## arg) |
#else |
#define dbg(format, arg...) do {} while (0) |
#endif |
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n" , __FILE__ , ## arg) |
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n" , __FILE__ , ## arg) |
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n" , __FILE__ , ## arg) |
#endif /* __KERNEL__ */ |
#endif |
#ifndef __LINUX_USB_H |
#define __LINUX_USB_H |
#include <linux/mod_devicetable.h> |
#include <linux/usb_ch9.h> |
#define USB_MAJOR 180 |
#ifdef __KERNEL__ |
#include <linux/config.h> |
#include <linux/errno.h> /* for -ENODEV */ |
#include <linux/delay.h> /* for mdelay() */ |
#include <linux/interrupt.h> /* for in_interrupt() */ |
#include <linux/list.h> /* for struct list_head */ |
#include <linux/device.h> /* for struct device */ |
#include <linux/fs.h> /* for struct file_operations */ |
#include <linux/completion.h> /* for struct completion */ |
#include <linux/sched.h> /* for current && schedule_timeout */ |
extern int wait_ms26(unsigned long timeout); |
static __inline__ void wait_ms(unsigned int ms) |
{ |
wait_ms26(ms); |
// if(!in_interrupt()) { |
// current->state = TASK_UNINTERRUPTIBLE; |
// schedule_timeout(1 + ms * HZ / 1000); |
// } |
// else |
// mdelay(ms); |
} |
struct usb_device; |
/*-------------------------------------------------------------------------*/ |
/* |
* Host-side wrappers for standard USB descriptors ... these are parsed |
* from the data provided by devices. Parsing turns them from a flat |
* sequence of descriptors into a hierarchy: |
* |
* - devices have one (usually) or more configs; |
* - configs have one (often) or more interfaces; |
* - interfaces have one (usually) or more settings; |
* - each interface setting has zero or (usually) more endpoints. |
* |
* And there might be other descriptors mixed in with those. |
* |
* Devices may also have class-specific or vendor-specific descriptors. |
*/ |
/* host-side wrapper for parsed endpoint descriptors */ |
struct usb_host_endpoint { |
struct usb_endpoint_descriptor desc; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
/* host-side wrapper for one interface setting's parsed descriptors */ |
struct usb_host_interface { |
struct usb_interface_descriptor desc; |
/* array of desc.bNumEndpoint endpoints associated with this |
* interface setting. these will be in no particular order. |
*/ |
struct usb_host_endpoint *endpoint; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
/** |
* struct usb_interface - what usb device drivers talk to |
* @altsetting: array of interface descriptors, one for each alternate |
* setting that may be selected. Each one includes a set of |
* endpoint configurations and will be in numberic order, |
* 0..num_altsetting. |
* @num_altsetting: number of altsettings defined. |
* @act_altsetting: index of current altsetting. this number is always |
* less than num_altsetting. after the device is configured, each |
* interface uses its default setting of zero. |
* @driver: the USB driver that is bound to this interface. |
* @minor: the minor number assigned to this interface, if this |
* interface is bound to a driver that uses the USB major number. |
* If this interface does not use the USB major, this field should |
* be unused. The driver should set this value in the probe() |
* function of the driver, after it has been assigned a minor |
* number from the USB core by calling usb_register_dev(). |
* @dev: driver model's view of this device |
* @class_dev: driver model's class view of this device. |
* |
* USB device drivers attach to interfaces on a physical device. Each |
* interface encapsulates a single high level function, such as feeding |
* an audio stream to a speaker or reporting a change in a volume control. |
* Many USB devices only have one interface. The protocol used to talk to |
* an interface's endpoints can be defined in a usb "class" specification, |
* or by a product's vendor. The (default) control endpoint is part of |
* every interface, but is never listed among the interface's descriptors. |
* |
* The driver that is bound to the interface can use standard driver model |
* calls such as dev_get_drvdata() on the dev member of this structure. |
* |
* Each interface may have alternate settings. The initial configuration |
* of a device sets the first of these, but the device driver can change |
* that setting using usb_set_interface(). Alternate settings are often |
* used to control the the use of periodic endpoints, such as by having |
* different endpoints use different amounts of reserved USB bandwidth. |
* All standards-conformant USB devices that use isochronous endpoints |
* will use them in non-default settings. |
*/ |
struct usb_interface { |
/* array of alternate settings for this interface. |
* these will be in numeric order, 0..num_altsettting |
*/ |
struct usb_host_interface *altsetting; |
unsigned act_altsetting; /* active alternate setting */ |
unsigned num_altsetting; /* number of alternate settings */ |
struct usb_driver *driver; /* driver */ |
int minor; /* minor number this interface is bound to */ |
struct device dev; /* interface specific device info */ |
struct class_device *class_dev; |
}; |
#define to_usb_interface(d) container_of(d, struct usb_interface, dev) |
#define interface_to_usbdev(intf) \ |
container_of(intf->dev.parent, struct usb_device, dev) |
static inline void *usb_get_intfdata (struct usb_interface *intf) |
{ |
return dev_get_drvdata (&intf->dev); |
} |
static inline void usb_set_intfdata (struct usb_interface *intf, void *data) |
{ |
dev_set_drvdata(&intf->dev, data); |
} |
/* this maximum is arbitrary */ |
#define USB_MAXINTERFACES 32 |
/* USB_DT_CONFIG: Configuration descriptor information. |
* |
* USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the |
* descriptor type is different. Highspeed-capable devices can look |
* different depending on what speed they're currently running. Only |
* devices with a USB_DT_DEVICE_QUALIFIER have an OTHER_SPEED_CONFIG. |
*/ |
struct usb_host_config { |
struct usb_config_descriptor desc; |
/* the interfaces associated with this configuration |
* these will be in numeric order, 0..desc.bNumInterfaces |
*/ |
struct usb_interface *interface[USB_MAXINTERFACES]; |
unsigned char *extra; /* Extra descriptors */ |
int extralen; |
}; |
// FIXME remove; exported only for drivers/usb/misc/auserwald.c |
// prefer usb_device->epnum[0..31] |
extern struct usb_endpoint_descriptor * |
usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum); |
int __usb_get_extra_descriptor(char *buffer, unsigned size, |
unsigned char type, void **ptr); |
#define usb_get_extra_descriptor(ifpoint,type,ptr)\ |
__usb_get_extra_descriptor((ifpoint)->extra,(ifpoint)->extralen,\ |
type,(void**)ptr) |
/* -------------------------------------------------------------------------- */ |
struct usb_operations; |
/* USB device number allocation bitmap */ |
struct usb_devmap { |
unsigned long devicemap[128 / (8*sizeof(unsigned long))]; |
}; |
/* |
* Allocated per bus (tree of devices) we have: |
*/ |
struct usb_bus { |
struct device *controller; /* host/master side hardware */ |
int busnum; /* Bus number (in order of reg) */ |
char *bus_name; /* stable id (PCI slot_name etc) */ |
int devnum_next; /* Next open device number in round-robin allocation */ |
struct usb_devmap devmap; /* device address allocation map */ |
struct usb_operations *op; /* Operations (specific to the HC) */ |
struct usb_device *root_hub; /* Root hub */ |
struct list_head bus_list; /* list of busses */ |
void *hcpriv; /* Host Controller private data */ |
int bandwidth_allocated; /* on this bus: how much of the time |
* reserved for periodic (intr/iso) |
* requests is used, on average? |
* Units: microseconds/frame. |
* Limits: Full/low speed reserve 90%, |
* while high speed reserves 80%. |
*/ |
int bandwidth_int_reqs; /* number of Interrupt requests */ |
int bandwidth_isoc_reqs; /* number of Isoc. requests */ |
struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ |
struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the bus */ |
struct class_device class_dev; /* class device for this bus */ |
void (*release)(struct usb_bus *bus); /* function to destroy this bus's memory */ |
}; |
#define to_usb_bus(d) container_of(d, struct usb_bus, class_dev) |
/* -------------------------------------------------------------------------- */ |
/* This is arbitrary. |
* From USB 2.0 spec Table 11-13, offset 7, a hub can |
* have up to 255 ports. The most yet reported is 10. |
*/ |
#define USB_MAXCHILDREN (16) |
struct usb_tt; |
struct usb_device { |
int devnum; /* Address on USB bus */ |
char devpath [16]; /* Use in messages: /port/port/... */ |
enum usb_device_state state; /* configured, not attached, etc */ |
enum usb_device_speed speed; /* high/full/low (or error) */ |
struct usb_tt *tt; /* low/full speed dev, highspeed hub */ |
int ttport; /* device port on that tt hub */ |
struct semaphore serialize; |
unsigned int toggle[2]; /* one bit for each endpoint ([0] = IN, [1] = OUT) */ |
unsigned int halted[2]; /* endpoint halts; one bit per endpoint # & direction; */ |
/* [0] = IN, [1] = OUT */ |
int epmaxpacketin[16]; /* INput endpoint specific maximums */ |
int epmaxpacketout[16]; /* OUTput endpoint specific maximums */ |
struct usb_device *parent; /* our hub, unless we're the root */ |
struct usb_bus *bus; /* Bus we're part of */ |
struct device dev; /* Generic device interface */ |
struct usb_device_descriptor descriptor;/* Descriptor */ |
struct usb_host_config *config; /* All of the configs */ |
struct usb_host_config *actconfig;/* the active configuration */ |
char **rawdescriptors; /* Raw descriptors for each config */ |
int have_langid; /* whether string_langid is valid yet */ |
int string_langid; /* language ID for strings */ |
void *hcpriv; /* Host Controller private data */ |
struct list_head filelist; |
struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ |
struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the device */ |
/* |
* Child devices - these can be either new devices |
* (if this is a hub device), or different instances |
* of this same device. |
* |
* Each instance needs its own set of data structures. |
*/ |
int maxchild; /* Number of ports if hub */ |
struct usb_device *children[USB_MAXCHILDREN]; |
}; |
#define to_usb_device(d) container_of(d, struct usb_device, dev) |
extern struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *); |
extern struct usb_device *usb_get_dev(struct usb_device *dev); |
extern void usb_put_dev(struct usb_device *dev); |
/* mostly for devices emulating SCSI over USB */ |
extern int usb_reset_device(struct usb_device *dev); |
extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); |
/* for drivers using iso endpoints */ |
extern int usb_get_current_frame_number (struct usb_device *usb_dev); |
/* used these for multi-interface device registration */ |
extern int usb_driver_claim_interface(struct usb_driver *driver, |
struct usb_interface *iface, void* priv); |
extern int usb_interface_claimed(struct usb_interface *iface); |
extern void usb_driver_release_interface(struct usb_driver *driver, |
struct usb_interface *iface); |
const struct usb_device_id *usb_match_id(struct usb_interface *interface, |
const struct usb_device_id *id); |
extern struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor); |
extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum); |
/** |
* usb_make_path - returns stable device path in the usb tree |
* @dev: the device whose path is being constructed |
* @buf: where to put the string |
* @size: how big is "buf"? |
* |
* Returns length of the string (> 0) or negative if size was too small. |
* |
* This identifier is intended to be "stable", reflecting physical paths in |
* hardware such as physical bus addresses for host controllers or ports on |
* USB hubs. That makes it stay the same until systems are physically |
* reconfigured, by re-cabling a tree of USB devices or by moving USB host |
* controllers. Adding and removing devices, including virtual root hubs |
* in host controller driver modules, does not change these path identifers; |
* neither does rebooting or re-enumerating. These are more useful identifiers |
* than changeable ("unstable") ones like bus numbers or device addresses. |
* |
* With a partial exception for devices connected to USB 2.0 root hubs, these |
* identifiers are also predictable. So long as the device tree isn't changed, |
* plugging any USB device into a given hub port always gives it the same path. |
* Because of the use of "companion" controllers, devices connected to ports on |
* USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are |
* high speed, and a different one if they are full or low speed. |
*/ |
static inline int usb_make_path (struct usb_device *dev, char *buf, size_t size) |
{ |
int actual; |
actual = snprintf26(buf, size, "usb-%s-%s", dev->bus->bus_name, dev->devpath); |
return (actual >= (int)size) ? -1 : actual; |
} |
/*-------------------------------------------------------------------------*/ |
#define USB_DEVICE_ID_MATCH_DEVICE (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) |
#define USB_DEVICE_ID_MATCH_DEV_RANGE (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI) |
#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE) |
#define USB_DEVICE_ID_MATCH_DEV_INFO \ |
(USB_DEVICE_ID_MATCH_DEV_CLASS | USB_DEVICE_ID_MATCH_DEV_SUBCLASS | USB_DEVICE_ID_MATCH_DEV_PROTOCOL) |
#define USB_DEVICE_ID_MATCH_INT_INFO \ |
(USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL) |
/** |
* USB_DEVICE - macro used to describe a specific usb device |
* @vend: the 16 bit USB Vendor ID |
* @prod: the 16 bit USB Product ID |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific device. |
*/ |
#define USB_DEVICE(vend,prod) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = (vend), .idProduct = (prod) |
/** |
* USB_DEVICE_VER - macro used to describe a specific usb device with a version range |
* @vend: the 16 bit USB Vendor ID |
* @prod: the 16 bit USB Product ID |
* @lo: the bcdDevice_lo value |
* @hi: the bcdDevice_hi value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific device, with a version range. |
*/ |
#define USB_DEVICE_VER(vend,prod,lo,hi) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = (vend), .idProduct = (prod), .bcdDevice_lo = (lo), .bcdDevice_hi = (hi) |
/** |
* USB_DEVICE_INFO - macro used to describe a class of usb devices |
* @cl: bDeviceClass value |
* @sc: bDeviceSubClass value |
* @pr: bDeviceProtocol value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific class of devices. |
*/ |
#define USB_DEVICE_INFO(cl,sc,pr) \ |
.match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, .bDeviceClass = (cl), .bDeviceSubClass = (sc), .bDeviceProtocol = (pr) |
/** |
* USB_INTERFACE_INFO - macro used to describe a class of usb interfaces |
* @cl: bInterfaceClass value |
* @sc: bInterfaceSubClass value |
* @pr: bInterfaceProtocol value |
* |
* This macro is used to create a struct usb_device_id that matches a |
* specific class of interfaces. |
*/ |
#define USB_INTERFACE_INFO(cl,sc,pr) \ |
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO, .bInterfaceClass = (cl), .bInterfaceSubClass = (sc), .bInterfaceProtocol = (pr) |
/* -------------------------------------------------------------------------- */ |
/** |
* struct usb_driver - identifies USB driver to usbcore |
* @owner: Pointer to the module owner of this driver; initialize |
* it using THIS_MODULE. |
* @name: The driver name should be unique among USB drivers, |
* and should normally be the same as the module name. |
* @probe: Called to see if the driver is willing to manage a particular |
* interface on a device. If it is, probe returns zero and uses |
* dev_set_drvdata() to associate driver-specific data with the |
* interface. It may also use usb_set_interface() to specify the |
* appropriate altsetting. If unwilling to manage the interface, |
* return a negative errno value. |
* @disconnect: Called when the interface is no longer accessible, usually |
* because its device has been (or is being) disconnected or the |
* driver module is being unloaded. |
* @ioctl: Used for drivers that want to talk to userspace through |
* the "usbfs" filesystem. This lets devices provide ways to |
* expose information to user space regardless of where they |
* do (or don't) show up otherwise in the filesystem. |
* @suspend: Called when the device is going to be suspended by the system. |
* @resume: Called when the device is being resumed by the system. |
* @id_table: USB drivers use ID table to support hotplugging. |
* Export this with MODULE_DEVICE_TABLE(usb,...). This must be set |
* or your driver's probe function will never get called. |
* @driver: the driver model core driver structure. |
* @serialize: a semaphore used to serialize access to this driver. Used |
* in the probe and disconnect functions. Only the USB core should use |
* this lock. |
* |
* USB drivers must provide a name, probe() and disconnect() methods, |
* and an id_table. Other driver fields are optional. |
* |
* The id_table is used in hotplugging. It holds a set of descriptors, |
* and specialized data may be associated with each entry. That table |
* is used by both user and kernel mode hotplugging support. |
* |
* The probe() and disconnect() methods are called in a context where |
* they can sleep, but they should avoid abusing the privilege. Most |
* work to connect to a device should be done when the device is opened, |
* and undone at the last close. The disconnect code needs to address |
* concurrency issues with respect to open() and close() methods, as |
* well as forcing all pending I/O requests to complete (by unlinking |
* them as necessary, and blocking until the unlinks complete). |
*/ |
struct usb_driver { |
struct module *owner; |
const char *name; |
int (*probe) (struct usb_interface *intf, |
const struct usb_device_id *id); |
void (*disconnect) (struct usb_interface *intf); |
int (*ioctl) (struct usb_interface *intf, unsigned int code, void *buf); |
int (*suspend) (struct usb_interface *intf, u32 state); |
int (*resume) (struct usb_interface *intf); |
const struct usb_device_id *id_table; |
struct device_driver driver; |
struct semaphore serialize; |
}; |
#define to_usb_driver(d) container_of(d, struct usb_driver, driver) |
extern struct bus_type usb_bus_type; |
/** |
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number |
* @name: devfs name for this driver. Will also be used by the driver |
* class code to create a usb class device. |
* @fops: pointer to the struct file_operations of this driver. |
* @mode: the mode for the devfs file to be created for this driver. |
* @minor_base: the start of the minor range for this driver. |
* |
* This structure is used for the usb_register_dev() and |
* usb_unregister_dev() functions, to consolodate a number of the |
* paramaters used for them. |
*/ |
struct usb_class_driver { |
char *name; |
struct file_operations *fops; |
mode_t mode; |
int minor_base; |
}; |
/* |
* use these in module_init()/module_exit() |
* and don't forget MODULE_DEVICE_TABLE(usb, ...) |
*/ |
extern int usb_register(struct usb_driver *); |
extern void usb_deregister(struct usb_driver *); |
extern int usb_register_dev(struct usb_interface *intf, |
struct usb_class_driver *class_driver); |
extern void usb_deregister_dev(struct usb_interface *intf, |
struct usb_class_driver *class_driver); |
extern int usb_disabled(void); |
/* -------------------------------------------------------------------------- */ |
/* |
* URB support, for asynchronous request completions |
*/ |
/* |
* urb->transfer_flags: |
*/ |
#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ |
#define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */ |
#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ |
#define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */ |
#define URB_ASYNC_UNLINK 0x0010 /* usb_unlink_urb() returns asap */ |
#define URB_NO_FSBR 0x0020 /* UHCI-specific */ |
#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */ |
#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */ |
struct usb_iso_packet_descriptor { |
unsigned int offset; |
unsigned int length; /* expected length */ |
unsigned int actual_length; |
unsigned int status; |
}; |
struct urb; |
struct pt_regs; |
typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); |
/** |
* struct urb - USB Request Block |
* @urb_list: For use by current owner of the URB. |
* @pipe: Holds endpoint number, direction, type, and more. |
* Create these values with the eight macros available; |
* usb_{snd,rcv}TYPEpipe(dev,endpoint), where the type is "ctrl" |
* (control), "bulk", "int" (interrupt), or "iso" (isochronous). |
* For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint |
* numbers range from zero to fifteen. Note that "in" endpoint two |
* is a different endpoint (and pipe) from "out" endpoint two. |
* The current configuration controls the existence, type, and |
* maximum packet size of any given endpoint. |
* @dev: Identifies the USB device to perform the request. |
* @status: This is read in non-iso completion functions to get the |
* status of the particular request. ISO requests only use it |
* to tell whether the URB was unlinked; detailed status for |
* each frame is in the fields of the iso_frame-desc. |
* @transfer_flags: A variety of flags may be used to affect how URB |
* submission, unlinking, or operation are handled. Different |
* kinds of URB can use different flags. |
* @transfer_buffer: This identifies the buffer to (or from) which |
* the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP |
* is set). This buffer must be suitable for DMA; allocate it with |
* kmalloc() or equivalent. For transfers to "in" endpoints, contents |
* of this buffer will be modified. This buffer is used for data |
* phases of control transfers. |
* @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, |
* the device driver is saying that it provided this DMA address, |
* which the host controller driver should use in preference to the |
* transfer_buffer. |
* @transfer_buffer_length: How big is transfer_buffer. The transfer may |
* be broken up into chunks according to the current maximum packet |
* size for the endpoint, which is a function of the configuration |
* and is encoded in the pipe. When the length is zero, neither |
* transfer_buffer nor transfer_dma is used. |
* @actual_length: This is read in non-iso completion functions, and |
* it tells how many bytes (out of transfer_buffer_length) were |
* transferred. It will normally be the same as requested, unless |
* either an error was reported or a short read was performed. |
* The URB_SHORT_NOT_OK transfer flag may be used to make such |
* short reads be reported as errors. |
* @setup_packet: Only used for control transfers, this points to eight bytes |
* of setup data. Control transfers always start by sending this data |
* to the device. Then transfer_buffer is read or written, if needed. |
* @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the |
* device driver has provided this DMA address for the setup packet. |
* The host controller driver should use this in preference to |
* setup_packet. |
* @start_frame: Returns the initial frame for interrupt or isochronous |
* transfers. |
* @number_of_packets: Lists the number of ISO transfer buffers. |
* @interval: Specifies the polling interval for interrupt or isochronous |
* transfers. The units are frames (milliseconds) for for full and low |
* speed devices, and microframes (1/8 millisecond) for highspeed ones. |
* @error_count: Returns the number of ISO transfers that reported errors. |
* @context: For use in completion functions. This normally points to |
* request-specific driver context. |
* @complete: Completion handler. This URB is passed as the parameter to the |
* completion function. The completion function may then do what |
* it likes with the URB, including resubmitting or freeing it. |
* @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to |
* collect the transfer status for each buffer. |
* @timeout: If set to zero, the urb will never timeout. Otherwise this is |
* the time in jiffies that this urb will timeout in. |
* |
* This structure identifies USB transfer requests. URBs must be allocated by |
* calling usb_alloc_urb() and freed with a call to usb_free_urb(). |
* Initialization may be done using various usb_fill_*_urb() functions. URBs |
* are submitted using usb_submit_urb(), and pending requests may be canceled |
* using usb_unlink_urb(). |
* |
* Data Transfer Buffers: |
* |
* Normally drivers provide I/O buffers allocated with kmalloc() or otherwise |
* taken from the general page pool. That is provided by transfer_buffer |
* (control requests also use setup_packet), and host controller drivers |
* perform a dma mapping (and unmapping) for each buffer transferred. Those |
* mapping operations can be expensive on some platforms (perhaps using a dma |
* bounce buffer or talking to an IOMMU), |
* although they're cheap on commodity x86 and ppc hardware. |
* |
* Alternatively, drivers may pass the URB_NO_xxx_DMA_MAP transfer flags, |
* which tell the host controller driver that no such mapping is needed since |
* the device driver is DMA-aware. For example, a device driver might |
* allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map(). |
* When these transfer flags are provided, host controller drivers will |
* attempt to use the dma addresses found in the transfer_dma and/or |
* setup_dma fields rather than determining a dma address themselves. (Note |
* that transfer_buffer and setup_packet must still be set because not all |
* host controllers use DMA, nor do virtual root hubs). |
* |
* Initialization: |
* |
* All URBs submitted must initialize dev, pipe, |
* transfer_flags (may be zero), complete, timeout (may be zero). |
* The URB_ASYNC_UNLINK transfer flag affects later invocations of |
* the usb_unlink_urb() routine. |
* |
* All URBs must also initialize |
* transfer_buffer and transfer_buffer_length. They may provide the |
* URB_SHORT_NOT_OK transfer flag, indicating that short reads are |
* to be treated as errors; that flag is invalid for write requests. |
* |
* Bulk URBs may |
* use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers |
* should always terminate with a short packet, even if it means adding an |
* extra zero length packet. |
* |
* Control URBs must provide a setup_packet. The setup_packet and |
* transfer_buffer may each be mapped for DMA or not, independently of |
* the other. The transfer_flags bits URB_NO_TRANSFER_DMA_MAP and |
* URB_NO_SETUP_DMA_MAP indicate which buffers have already been mapped. |
* URB_NO_SETUP_DMA_MAP is ignored for non-control URBs. |
* |
* Interrupt UBS must provide an interval, saying how often (in milliseconds |
* or, for highspeed devices, 125 microsecond units) |
* to poll for transfers. After the URB has been submitted, the interval |
* and start_frame fields reflect how the transfer was actually scheduled. |
* The polling interval may be more frequent than requested. |
* For example, some controllers have a maximum interval of 32 microseconds, |
* while others support intervals of up to 1024 microseconds. |
* Isochronous URBs also have transfer intervals. (Note that for isochronous |
* endpoints, as well as high speed interrupt endpoints, the encoding of |
* the transfer interval in the endpoint descriptor is logarithmic.) |
* |
* Isochronous URBs normally use the URB_ISO_ASAP transfer flag, telling |
* the host controller to schedule the transfer as soon as bandwidth |
* utilization allows, and then set start_frame to reflect the actual frame |
* selected during submission. Otherwise drivers must specify the start_frame |
* and handle the case where the transfer can't begin then. However, drivers |
* won't know how bandwidth is currently allocated, and while they can |
* find the current frame using usb_get_current_frame_number () they can't |
* know the range for that frame number. (Ranges for frame counter values |
* are HC-specific, and can go from 256 to 65536 frames from "now".) |
* |
* Isochronous URBs have a different data transfer model, in part because |
* the quality of service is only "best effort". Callers provide specially |
* allocated URBs, with number_of_packets worth of iso_frame_desc structures |
* at the end. Each such packet is an individual ISO transfer. Isochronous |
* URBs are normally queued, submitted by drivers to arrange that |
* transfers are at least double buffered, and then explicitly resubmitted |
* in completion handlers, so |
* that data (such as audio or video) streams at as constant a rate as the |
* host controller scheduler can support. |
* |
* Completion Callbacks: |
* |
* The completion callback is made in_interrupt(), and one of the first |
* things that a completion handler should do is check the status field. |
* The status field is provided for all URBs. It is used to report |
* unlinked URBs, and status for all non-ISO transfers. It should not |
* be examined before the URB is returned to the completion handler. |
* |
* The context field is normally used to link URBs back to the relevant |
* driver or request state. |
* |
* When completion callback is invoked for non-isochronous URBs, the |
* actual_length field tells how many bytes were transferred. |
* |
* ISO transfer status is reported in the status and actual_length fields |
* of the iso_frame_desc array, and the number of errors is reported in |
* error_count. Completion callbacks for ISO transfers will normally |
* (re)submit URBs to ensure a constant transfer rate. |
*/ |
struct urb |
{ |
/* private, usb core and host controller only fields in the urb */ |
spinlock_t lock; /* lock for the URB */ |
atomic_t count; /* reference count of the URB */ |
void *hcpriv; /* private data for host controller */ |
struct list_head urb_list; /* list pointer to all active urbs */ |
int bandwidth; /* bandwidth for INT/ISO request */ |
/* public, documented fields in the urb that can be used by drivers */ |
struct usb_device *dev; /* (in) pointer to associated device */ |
unsigned int pipe; /* (in) pipe information */ |
int status; /* (return) non-ISO status */ |
unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ |
void *transfer_buffer; /* (in) associated data buffer */ |
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ |
int transfer_buffer_length; /* (in) data buffer length */ |
int actual_length; /* (return) actual transfer length */ |
unsigned char *setup_packet; /* (in) setup packet (control only) */ |
dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ |
int start_frame; /* (modify) start frame (INT/ISO) */ |
int number_of_packets; /* (in) number of ISO packets */ |
int interval; /* (in) transfer interval (INT/ISO) */ |
int error_count; /* (return) number of ISO errors */ |
int timeout; /* (in) timeout, in jiffies */ |
void *context; /* (in) context for completion */ |
usb_complete_t complete; /* (in) completion routine */ |
struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */ |
}; |
/* -------------------------------------------------------------------------- */ |
/** |
* usb_fill_control_urb - initializes a control urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @setup_packet: pointer to the setup_packet buffer |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* |
* Initializes a control urb with the proper information needed to submit |
* it to a device. |
*/ |
static inline void usb_fill_control_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
unsigned char *setup_packet, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->setup_packet = setup_packet; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
} |
/** |
* usb_fill_bulk_urb - macro to help initialize a bulk urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* |
* Initializes a bulk urb with the proper information needed to submit it |
* to a device. |
*/ |
static inline void usb_fill_bulk_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
} |
/** |
* usb_fill_int_urb - macro to help initialize a interrupt urb |
* @urb: pointer to the urb to initialize. |
* @dev: pointer to the struct usb_device for this urb. |
* @pipe: the endpoint pipe |
* @transfer_buffer: pointer to the transfer buffer |
* @buffer_length: length of the transfer buffer |
* @complete: pointer to the usb_complete_t function |
* @context: what to set the urb context to. |
* @interval: what to set the urb interval to, encoded like |
* the endpoint descriptor's bInterval value. |
* |
* Initializes a interrupt urb with the proper information needed to submit |
* it to a device. |
* Note that high speed interrupt endpoints use a logarithmic encoding of |
* the endpoint interval, and express polling intervals in microframes |
* (eight per millisecond) rather than in frames (one per millisecond). |
*/ |
static inline void usb_fill_int_urb (struct urb *urb, |
struct usb_device *dev, |
unsigned int pipe, |
void *transfer_buffer, |
int buffer_length, |
usb_complete_t complete, |
void *context, |
int interval) |
{ |
spin_lock_init(&urb->lock); |
urb->dev = dev; |
urb->pipe = pipe; |
urb->transfer_buffer = transfer_buffer; |
urb->transfer_buffer_length = buffer_length; |
urb->complete = complete; |
urb->context = context; |
if (dev->speed == USB_SPEED_HIGH) |
urb->interval = 1 << (interval - 1); |
else |
urb->interval = interval; |
urb->start_frame = -1; |
} |
extern void usb_init_urb(struct urb *urb); |
extern struct urb *usb_alloc_urb(int iso_packets, int mem_flags); |
extern void usb_free_urb(struct urb *urb); |
#define usb_put_urb usb_free_urb |
extern struct urb *usb_get_urb(struct urb *urb); |
extern int usb_submit_urb(struct urb *urb, int mem_flags); |
extern int usb_unlink_urb(struct urb *urb); |
#define HAVE_USB_BUFFERS |
void *usb_buffer_alloc (struct usb_device *dev, size_t size, |
int mem_flags, dma_addr_t *dma); |
void usb_buffer_free (struct usb_device *dev, size_t size, |
void *addr, dma_addr_t dma); |
struct urb *usb_buffer_map (struct urb *urb); |
void usb_buffer_dmasync (struct urb *urb); |
void usb_buffer_unmap (struct urb *urb); |
struct scatterlist; |
int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int nents); |
void usb_buffer_dmasync_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int n_hw_ents); |
void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe, |
struct scatterlist *sg, int n_hw_ents); |
/*-------------------------------------------------------------------* |
* SYNCHRONOUS CALL SUPPORT * |
*-------------------------------------------------------------------*/ |
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, |
__u8 request, __u8 requesttype, __u16 value, __u16 index, |
void *data, __u16 size, int timeout); |
extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, |
void *data, int len, int *actual_length, |
int timeout); |
/* wrappers around usb_control_msg() for the most common standard requests */ |
extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, |
unsigned char descindex, void *buf, int size); |
extern int usb_get_device_descriptor(struct usb_device *dev); |
extern int usb_get_status(struct usb_device *dev, |
int type, int target, void *data); |
extern int usb_get_string(struct usb_device *dev, |
unsigned short langid, unsigned char index, void *buf, int size); |
extern int usb_string(struct usb_device *dev, int index, |
char *buf, size_t size); |
/* wrappers that also update important state inside usbcore */ |
extern int usb_clear_halt(struct usb_device *dev, int pipe); |
extern int usb_reset_configuration(struct usb_device *dev); |
extern int usb_set_configuration(struct usb_device *dev, int configuration); |
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); |
/* |
* timeouts, in seconds, used for sending/receiving control messages |
* they typically complete within a few frames (msec) after they're issued |
* USB identifies 5 second timeouts, maybe more in a few cases, and a few |
* slow devices (like some MGE Ellipse UPSes) actually push that limit. |
*/ |
#define USB_CTRL_GET_TIMEOUT 5 |
#define USB_CTRL_SET_TIMEOUT 5 |
/** |
* struct usb_sg_request - support for scatter/gather I/O |
* @status: zero indicates success, else negative errno |
* @bytes: counts bytes transferred. |
* |
* These requests are initialized using usb_sg_init(), and then are used |
* as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most |
* members of the request object aren't for driver access. |
* |
* The status and bytecount values are valid only after usb_sg_wait() |
* returns. If the status is zero, then the bytecount matches the total |
* from the request. |
* |
* After an error completion, drivers may need to clear a halt condition |
* on the endpoint. |
*/ |
struct usb_sg_request { |
int status; |
size_t bytes; |
/* |
* members below are private to usbcore, |
* and are not provided for driver access! |
*/ |
spinlock_t lock; |
struct usb_device *dev; |
int pipe; |
struct scatterlist *sg; |
int nents; |
int entries; |
struct urb **urbs; |
int count; |
struct completion complete; |
}; |
int usb_sg_init ( |
struct usb_sg_request *io, |
struct usb_device *dev, |
unsigned pipe, |
unsigned period, |
struct scatterlist *sg, |
int nents, |
size_t length, |
int mem_flags |
); |
void usb_sg_cancel (struct usb_sg_request *io); |
void usb_sg_wait (struct usb_sg_request *io); |
/* -------------------------------------------------------------------------- */ |
/* |
* Calling this entity a "pipe" is glorifying it. A USB pipe |
* is something embarrassingly simple: it basically consists |
* of the following information: |
* - device number (7 bits) |
* - endpoint number (4 bits) |
* - current Data0/1 state (1 bit) [Historical; now gone] |
* - direction (1 bit) |
* - speed (1 bit) [Historical and specific to USB 1.1; now gone.] |
* - max packet size (2 bits: 8, 16, 32 or 64) [Historical; now gone.] |
* - pipe type (2 bits: control, interrupt, bulk, isochronous) |
* |
* That's 18 bits. Really. Nothing more. And the USB people have |
* documented these eighteen bits as some kind of glorious |
* virtual data structure. |
* |
* Let's not fall in that trap. We'll just encode it as a simple |
* unsigned int. The encoding is: |
* |
* - max size: bits 0-1 [Historical; now gone.] |
* - direction: bit 7 (0 = Host-to-Device [Out], |
* 1 = Device-to-Host [In] ... |
* like endpoint bEndpointAddress) |
* - device: bits 8-14 ... bit positions known to uhci-hcd |
* - endpoint: bits 15-18 ... bit positions known to uhci-hcd |
* - Data0/1: bit 19 [Historical; now gone. ] |
* - lowspeed: bit 26 [Historical; now gone. ] |
* - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt, |
* 10 = control, 11 = bulk) |
* |
* Why? Because it's arbitrary, and whatever encoding we select is really |
* up to us. This one happens to share a lot of bit positions with the UHCI |
* specification, so that much of the uhci driver can just mask the bits |
* appropriately. |
*/ |
/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ |
#define PIPE_ISOCHRONOUS 0 |
#define PIPE_INTERRUPT 1 |
#define PIPE_CONTROL 2 |
#define PIPE_BULK 3 |
#define usb_maxpacket(dev, pipe, out) (out \ |
? (dev)->epmaxpacketout[usb_pipeendpoint(pipe)] \ |
: (dev)->epmaxpacketin [usb_pipeendpoint(pipe)] ) |
#define usb_pipein(pipe) ((pipe) & USB_DIR_IN) |
#define usb_pipeout(pipe) (!usb_pipein(pipe)) |
#define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f) |
#define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf) |
#define usb_pipetype(pipe) (((pipe) >> 30) & 3) |
#define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS) |
#define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT) |
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) |
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) |
/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ |
#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) |
#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) |
#define usb_settoggle(dev, ep, out, bit) ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | ((bit) << (ep))) |
/* Endpoint halt control/status ... likewise USE WITH CAUTION */ |
#define usb_endpoint_running(dev, ep, out) ((dev)->halted[out] &= ~(1 << (ep))) |
#define usb_endpoint_halted(dev, ep, out) ((dev)->halted[out] & (1 << (ep))) |
static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint) |
{ |
return (dev->devnum << 8) | (endpoint << 15); |
} |
/* Create various pipes... */ |
#define usb_sndctrlpipe(dev,endpoint) ((PIPE_CONTROL << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvctrlpipe(dev,endpoint) ((PIPE_CONTROL << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndisocpipe(dev,endpoint) ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvisocpipe(dev,endpoint) ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndbulkpipe(dev,endpoint) ((PIPE_BULK << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvbulkpipe(dev,endpoint) ((PIPE_BULK << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
#define usb_sndintpipe(dev,endpoint) ((PIPE_INTERRUPT << 30) | __create_pipe(dev,endpoint)) |
#define usb_rcvintpipe(dev,endpoint) ((PIPE_INTERRUPT << 30) | __create_pipe(dev,endpoint) | USB_DIR_IN) |
/* -------------------------------------------------------------------------- */ |
/* |
* Debugging and troubleshooting/diagnostic helpers. |
*/ |
void usb_show_device_descriptor(struct usb_device_descriptor *); |
void usb_show_config_descriptor(struct usb_config_descriptor *); |
void usb_show_interface_descriptor(struct usb_interface_descriptor *); |
void usb_show_endpoint_descriptor(struct usb_endpoint_descriptor *); |
void usb_show_device(struct usb_device *); |
void usb_show_string(struct usb_device *dev, char *id, int index); |
#ifdef DEBUG |
#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , ## arg) |
#else |
#define dbg(format, arg...) do {} while (0) |
#endif |
#ifdef DEBUG |
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n" , __FILE__ , ## arg) |
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n" , __FILE__ , ## arg) |
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n" , __FILE__ , ## arg) |
#else |
#define err(format, arg...) |
#define info(format, arg...) |
#define warn(format, arg...) |
#endif |
#endif /* __KERNEL__ */ |
#endif |
/shark/trunk/drivers/linuxc26/include/linux/wait.h |
---|
1,243 → 1,246 |
#ifndef _LINUX_WAIT_H |
#define _LINUX_WAIT_H |
#define WNOHANG 0x00000001 |
#define WUNTRACED 0x00000002 |
#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */ |
#define __WALL 0x40000000 /* Wait on all children, regardless of type */ |
#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */ |
#ifdef __KERNEL__ |
#include <linux/config.h> |
#include <linux/list.h> |
#include <linux/stddef.h> |
#include <linux/spinlock.h> |
#include <asm/system.h> |
typedef struct __wait_queue wait_queue_t; |
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync); |
extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
struct __wait_queue { |
unsigned int flags; |
#define WQ_FLAG_EXCLUSIVE 0x01 |
struct task_struct * task; |
wait_queue_func_t func; |
struct list_head task_list; |
}; |
struct __wait_queue_head { |
spinlock_t lock; |
struct list_head task_list; |
}; |
typedef struct __wait_queue_head wait_queue_head_t; |
/* |
* Macros for declaration and initialisaton of the datatypes |
*/ |
#define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
.task = tsk, \ |
.func = default_wake_function, \ |
.task_list = { NULL, NULL } } |
#define DECLARE_WAITQUEUE(name, tsk) \ |
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
.lock = SPIN_LOCK_UNLOCKED, \ |
.task_list = { &(name).task_list, &(name).task_list } } |
#define DECLARE_WAIT_QUEUE_HEAD(name) \ |
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
extern void init_waitqueue_head(wait_queue_head_t *q); |
extern void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p); |
extern void init_waitqueue_func_entry(wait_queue_t *q, |
wait_queue_func_t func); |
extern int waitqueue_active(wait_queue_head_t *q); |
extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new); |
/* |
* Used for wake-one threads: |
*/ |
static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
wait_queue_t *new) |
{ |
list_add_tail(&new->task_list, &head->task_list); |
} |
static inline void __remove_wait_queue(wait_queue_head_t *head, |
wait_queue_t *old) |
{ |
list_del(&old->task_list); |
} |
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); |
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); |
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); |
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) |
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) |
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
#define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) |
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) |
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) |
#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) |
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) |
#define __wait_event(wq, condition) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
schedule(); \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event(wq, condition) \ |
do { \ |
if (condition) \ |
break; \ |
__wait_event(wq, condition); \ |
} while (0) |
#define __wait_event_interruptible(wq, condition, ret) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_INTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
if (!signal_pending(current)) { \ |
schedule(); \ |
continue; \ |
} \ |
ret = -ERESTARTSYS; \ |
break; \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event_interruptible(wq, condition) \ |
({ \ |
int __ret = 0; \ |
if (!(condition)) \ |
__wait_event_interruptible(wq, condition, __ret); \ |
__ret; \ |
}) |
#define __wait_event_interruptible_timeout(wq, condition, ret) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_INTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
if (!signal_pending(current)) { \ |
ret = schedule_timeout(ret); \ |
if (!ret) \ |
break; \ |
continue; \ |
} \ |
ret = -ERESTARTSYS; \ |
break; \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event_interruptible_timeout(wq, condition, timeout) \ |
({ \ |
long __ret = timeout; \ |
if (!(condition)) \ |
__wait_event_interruptible_timeout(wq, condition, __ret); \ |
__ret; \ |
}) |
/* |
* Must be called with the spinlock in the wait_queue_head_t held. |
*/ |
static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, |
wait_queue_t * wait) |
{ |
wait->flags |= WQ_FLAG_EXCLUSIVE; |
__add_wait_queue_tail(q, wait); |
} |
/* |
* Must be called with the spinlock in the wait_queue_head_t held. |
*/ |
static inline void remove_wait_queue_locked(wait_queue_head_t *q, |
wait_queue_t * wait) |
{ |
__remove_wait_queue(q, wait); |
} |
/* |
* These are the old interfaces to sleep waiting for an event. |
* They are racy. DO NOT use them, use the wait_event* interfaces above. |
* We plan to remove these interfaces during 2.7. |
*/ |
extern void FASTCALL(sleep_on(wait_queue_head_t *q)); |
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, |
signed long timeout)); |
extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); |
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, |
signed long timeout)); |
/* |
* Waitqueues which are removed from the waitqueue_head at wakeup time |
*/ |
void FASTCALL(prepare_to_wait(wait_queue_head_t *q, |
wait_queue_t *wait, int state)); |
void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, |
wait_queue_t *wait, int state)); |
void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
#define DEFINE_WAIT(name) \ |
wait_queue_t name = { \ |
.task = current, \ |
.func = autoremove_wake_function, \ |
.task_list = { .next = &name.task_list, \ |
.prev = &name.task_list, \ |
}, \ |
} |
#define init_wait(wait) \ |
do { \ |
wait->task = current; \ |
wait->func = autoremove_wake_function; \ |
INIT_LIST_HEAD(&wait->task_list); \ |
} while (0) |
#endif /* __KERNEL__ */ |
#endif |
#ifndef _LINUX_WAIT_H |
#define _LINUX_WAIT_H |
#define WNOHANG 0x00000001 |
#define WUNTRACED 0x00000002 |
#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */ |
#define __WALL 0x40000000 /* Wait on all children, regardless of type */ |
#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */ |
#ifdef __KERNEL__ |
#include <linux/config.h> |
#include <linux/list.h> |
#include <linux/stddef.h> |
#include <linux/spinlock.h> |
#include <asm/system.h> |
typedef struct __wait_queue wait_queue_t; |
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync); |
extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
struct __wait_queue { |
unsigned int flags; |
#define WQ_FLAG_EXCLUSIVE 0x01 |
struct task_struct * task; |
wait_queue_func_t func; |
struct list_head task_list; |
}; |
struct __wait_queue_head { |
spinlock_t lock; |
struct list_head task_list; |
}; |
typedef struct __wait_queue_head wait_queue_head_t; |
/* |
* Macros for declaration and initialisaton of the datatypes |
*/ |
#define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
.task = tsk, \ |
.func = default_wake_function, \ |
.task_list = { NULL, NULL } } |
#define DECLARE_WAITQUEUE(name, tsk) \ |
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
.lock = SPIN_LOCK_UNLOCKED, \ |
.task_list = { &(name).task_list, &(name).task_list } } |
#define DECLARE_WAIT_QUEUE_HEAD(name) \ |
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
extern void init_waitqueue_head(wait_queue_head_t *q); |
extern void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p); |
extern void init_waitqueue_func_entry(wait_queue_t *q, |
wait_queue_func_t func); |
extern int waitqueue_active(wait_queue_head_t *q); |
extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
extern void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new); |
/* |
* Used for wake-one threads: |
*/ |
static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
wait_queue_t *new) |
{ |
list_add_tail(&new->task_list, &head->task_list); |
} |
extern void free(void *ptr); |
static inline void __remove_wait_queue(wait_queue_head_t *head, |
wait_queue_t *old) |
{ |
free(old->task); //** SHARK |
list_del(&old->task_list); |
} |
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); |
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); |
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); |
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) |
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) |
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
#define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) |
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) |
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) |
#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) |
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) |
#define __wait_event(wq, condition) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
schedule(); \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event(wq, condition) \ |
do { \ |
if (condition) \ |
break; \ |
__wait_event(wq, condition); \ |
} while (0) |
#define __wait_event_interruptible(wq, condition, ret) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_INTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
if (!signal_pending(current)) { \ |
schedule(); \ |
continue; \ |
} \ |
ret = -ERESTARTSYS; \ |
break; \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event_interruptible(wq, condition) \ |
({ \ |
int __ret = 0; \ |
if (!(condition)) \ |
__wait_event_interruptible(wq, condition, __ret); \ |
__ret; \ |
}) |
#define __wait_event_interruptible_timeout(wq, condition, ret) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_INTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
if (!signal_pending(current)) { \ |
ret = schedule_timeout(ret); \ |
if (!ret) \ |
break; \ |
continue; \ |
} \ |
ret = -ERESTARTSYS; \ |
break; \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
#define wait_event_interruptible_timeout(wq, condition, timeout) \ |
({ \ |
long __ret = timeout; \ |
if (!(condition)) \ |
__wait_event_interruptible_timeout(wq, condition, __ret); \ |
__ret; \ |
}) |
/* |
* Must be called with the spinlock in the wait_queue_head_t held. |
*/ |
static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, |
wait_queue_t * wait) |
{ |
wait->flags |= WQ_FLAG_EXCLUSIVE; |
__add_wait_queue_tail(q, wait); |
} |
/* |
* Must be called with the spinlock in the wait_queue_head_t held. |
*/ |
static inline void remove_wait_queue_locked(wait_queue_head_t *q, |
wait_queue_t * wait) |
{ |
__remove_wait_queue(q, wait); |
} |
/* |
* These are the old interfaces to sleep waiting for an event. |
* They are racy. DO NOT use them, use the wait_event* interfaces above. |
* We plan to remove these interfaces during 2.7. |
*/ |
extern void FASTCALL(sleep_on(wait_queue_head_t *q)); |
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, |
signed long timeout)); |
extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); |
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, |
signed long timeout)); |
/* |
* Waitqueues which are removed from the waitqueue_head at wakeup time |
*/ |
void FASTCALL(prepare_to_wait(wait_queue_head_t *q, |
wait_queue_t *wait, int state)); |
void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, |
wait_queue_t *wait, int state)); |
void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
#define DEFINE_WAIT(name) \ |
wait_queue_t name = { \ |
.task = current, \ |
.func = autoremove_wake_function, \ |
.task_list = { .next = &name.task_list, \ |
.prev = &name.task_list, \ |
}, \ |
} |
#define init_wait(wait) \ |
do { \ |
wait->task = current; \ |
wait->func = autoremove_wake_function; \ |
INIT_LIST_HEAD(&wait->task_list); \ |
} while (0) |
#endif /* __KERNEL__ */ |
#endif |
/shark/trunk/drivers/linuxc26/include/linux/sched.h |
---|
1,911 → 1,911 |
#ifndef _LINUX_SCHED_H |
#define _LINUX_SCHED_H |
#include <asm/param.h> /* for HZ */ |
#include <linux/config.h> |
#include <linux/capability.h> |
#include <linux/threads.h> |
#include <linux/kernel.h> |
#include <linux/types.h> |
#include <linux/timex.h> |
#include <linux/jiffies.h> |
#include <linux/rbtree.h> |
#include <linux/thread_info.h> |
#include <linux/cpumask.h> |
#include <asm/system.h> |
#include <asm/semaphore.h> |
#include <asm/page.h> |
#include <asm/ptrace.h> |
#include <asm/mmu.h> |
#include <linux/smp.h> |
#include <linux/sem.h> |
#include <linux/signal.h> |
#include <linux/securebits.h> |
#include <linux/fs_struct.h> |
#include <linux/compiler.h> |
#include <linux/completion.h> |
#include <linux/pid.h> |
#include <linux/percpu.h> |
struct exec_domain; |
/* |
* cloning flags: |
*/ |
#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ |
#define CLONE_VM 0x00000100 /* set if VM shared between processes */ |
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ |
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ |
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ |
#define CLONE_IDLETASK 0x00001000 /* set if new pid should be 0 (kernel only)*/ |
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ |
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ |
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ |
#define CLONE_THREAD 0x00010000 /* Same thread group? */ |
#define CLONE_NEWNS 0x00020000 /* New namespace group? */ |
#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ |
#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ |
#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ |
#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ |
#define CLONE_DETACHED 0x00400000 /* Not used - CLONE_THREAD implies detached uniquely */ |
#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ |
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ |
#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ |
/* |
* List of flags we want to share for kernel threads, |
* if only because they are not used by them anyway. |
*/ |
#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) |
/* |
* These are the constant used to fake the fixed-point load-average |
* counting. Some notes: |
* - 11 bit fractions expand to 22 bits by the multiplies: this gives |
* a load-average precision of 10 bits integer + 11 bits fractional |
* - if you want to count load-averages more often, you need more |
* precision, or rounding will get you. With 2-second counting freq, |
* the EXP_n values would be 1981, 2034 and 2043 if still using only |
* 11 bit fractions. |
*/ |
extern unsigned long avenrun[]; /* Load averages */ |
#define FSHIFT 11 /* nr of bits of precision */ |
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ |
#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ |
#define EXP_5 2014 /* 1/exp(5sec/5min) */ |
#define EXP_15 2037 /* 1/exp(5sec/15min) */ |
#define CALC_LOAD(load,exp,n) \ |
load *= exp; \ |
load += n*(FIXED_1-exp); \ |
load >>= FSHIFT; |
#define CT_TO_SECS(x) ((x) / HZ) |
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) |
extern int nr_threads; |
extern int last_pid; |
DECLARE_PER_CPU(unsigned long, process_counts); |
extern int nr_processes(void); |
extern unsigned long nr_running(void); |
extern unsigned long nr_uninterruptible(void); |
extern unsigned long nr_iowait(void); |
#include <linux/time.h> |
#include <linux/param.h> |
#include <linux/resource.h> |
#include <linux/timer.h> |
#include <asm/processor.h> |
#define TASK_RUNNING 0 |
#define TASK_INTERRUPTIBLE 1 |
#define TASK_UNINTERRUPTIBLE 2 |
#define TASK_STOPPED 4 |
#define TASK_ZOMBIE 8 |
#define TASK_DEAD 16 |
#define __set_task_state(tsk, state_value) \ |
do { (tsk)->state = (state_value); } while (0) |
#define set_task_state(tsk, state_value) \ |
set_mb((tsk)->state, (state_value)) |
#define __set_current_state(state_value) \ |
do { current->state = (state_value); } while (0) |
#define set_current_state(state_value) \ |
set_mb(current->state, (state_value)) |
/* |
* Scheduling policies |
*/ |
#define SCHED_NORMAL 0 |
#define SCHED_FIFO 1 |
#define SCHED_RR 2 |
struct sched_param { |
int sched_priority; |
}; |
#ifdef __KERNEL__ |
#include <linux/spinlock.h> |
/* |
* This serializes "schedule()" and also protects |
* the run-queue from deletions/modifications (but |
* _adding_ to the beginning of the run-queue has |
* a separate lock). |
*/ |
extern rwlock_t tasklist_lock; |
extern spinlock_t mmlist_lock; |
typedef struct task_struct task_t; |
extern void sched_init(void); |
extern void init_idle(task_t *idle, int cpu); |
extern void show_state(void); |
extern void show_regs(struct pt_regs *); |
/* |
* TASK is a pointer to the task whose backtrace we want to see (or NULL for current |
* task), SP is the stack pointer of the first frame that should be shown in the back |
* trace (or NULL if the entire call-chain of the task should be shown). |
*/ |
extern void show_stack(struct task_struct *task, unsigned long *sp); |
void io_schedule(void); |
long io_schedule_timeout(long timeout); |
extern void cpu_init (void); |
extern void trap_init(void); |
extern void update_process_times(int user); |
extern void update_one_process(struct task_struct *p, unsigned long user, |
unsigned long system, int cpu); |
extern void scheduler_tick(int user_tick, int system); |
extern unsigned long cache_decay_ticks; |
#define MAX_SCHEDULE_TIMEOUT LONG_MAX |
extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
asmlinkage void schedule(void); |
struct namespace; |
/* Maximum number of active map areas.. This is a random (large) number */ |
#define MAX_MAP_COUNT (65536) |
#include <linux/aio.h> |
struct mm_struct { |
struct vm_area_struct * mmap; /* list of VMAs */ |
struct rb_root mm_rb; |
struct vm_area_struct * mmap_cache; /* last find_vma result */ |
unsigned long free_area_cache; /* first hole */ |
pgd_t * pgd; |
atomic_t mm_users; /* How many users with user space? */ |
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ |
int map_count; /* number of VMAs */ |
struct rw_semaphore mmap_sem; |
spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ |
struct list_head mmlist; /* List of all active mm's. These are globally strung |
* together off init_mm.mmlist, and are protected |
* by mmlist_lock |
*/ |
unsigned long start_code, end_code, start_data, end_data; |
unsigned long start_brk, brk, start_stack; |
unsigned long arg_start, arg_end, env_start, env_end; |
unsigned long rss, total_vm, locked_vm; |
unsigned long def_flags; |
cpumask_t cpu_vm_mask; |
unsigned long swap_address; |
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */ |
unsigned dumpable:1; |
#ifdef CONFIG_HUGETLB_PAGE |
int used_hugetlb; |
#endif |
/* Architecture-specific MM context */ |
mm_context_t context; |
/* coredumping support */ |
int core_waiters; |
struct completion *core_startup_done, core_done; |
/* aio bits */ |
rwlock_t ioctx_list_lock; |
struct kioctx *ioctx_list; |
struct kioctx default_kioctx; |
}; |
extern int mmlist_nr; |
struct sighand_struct { |
atomic_t count; |
struct k_sigaction action[_NSIG]; |
spinlock_t siglock; |
}; |
/* |
* NOTE! "signal_struct" does not have it's own |
* locking, because a shared signal_struct always |
* implies a shared sighand_struct, so locking |
* sighand_struct is always a proper superset of |
* the locking of signal_struct. |
*/ |
struct signal_struct { |
atomic_t count; |
/* current thread group signal load-balancing target: */ |
task_t *curr_target; |
/* shared signal handling: */ |
struct sigpending shared_pending; |
/* thread group exit support */ |
int group_exit; |
int group_exit_code; |
/* overloaded: |
* - notify group_exit_task when ->count is equal to notify_count |
* - everyone except group_exit_task is stopped during signal delivery |
* of fatal signals, group_exit_task processes the signal. |
*/ |
struct task_struct *group_exit_task; |
int notify_count; |
/* thread group stop support, overloads group_exit_code too */ |
int group_stop_count; |
}; |
/* |
* Priority of a process goes from 0..MAX_PRIO-1, valid RT |
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are |
* in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values |
* are inverted: lower p->prio value means higher priority. |
* |
* The MAX_RT_USER_PRIO value allows the actual maximum |
* RT priority to be separate from the value exported to |
* user-space. This allows kernel threads to set their |
* priority to a value higher than any user task. Note: |
* MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. |
*/ |
#define MAX_USER_RT_PRIO 100 |
#define MAX_RT_PRIO MAX_USER_RT_PRIO |
#define MAX_PRIO (MAX_RT_PRIO + 40) |
#define rt_task(p) ((p)->prio < MAX_RT_PRIO) |
/* |
* Some day this will be a full-fledged user tracking system.. |
*/ |
struct user_struct { |
atomic_t __count; /* reference count */ |
atomic_t processes; /* How many processes does this user have? */ |
atomic_t files; /* How many open files does this user have? */ |
/* Hash table maintenance information */ |
struct list_head uidhash_list; |
uid_t uid; |
}; |
extern struct user_struct *find_user(uid_t); |
extern struct user_struct root_user; |
#define INIT_USER (&root_user) |
typedef struct prio_array prio_array_t; |
struct backing_dev_info; |
struct reclaim_state; |
/* POSIX.1b interval timer structure. */ |
struct k_itimer { |
struct list_head list; /* free/ allocate list */ |
spinlock_t it_lock; |
clockid_t it_clock; /* which timer type */ |
timer_t it_id; /* timer id */ |
int it_overrun; /* overrun on pending signal */ |
int it_overrun_last; /* overrun on last delivered signal */ |
int it_requeue_pending; /* waiting to requeue this timer */ |
int it_sigev_notify; /* notify word of sigevent struct */ |
int it_sigev_signo; /* signo word of sigevent struct */ |
sigval_t it_sigev_value; /* value word of sigevent struct */ |
unsigned long it_incr; /* interval specified in jiffies */ |
struct task_struct *it_process; /* process to send signal to */ |
struct timer_list it_timer; |
struct sigqueue *sigq; /* signal queue entry. */ |
}; |
struct io_context; /* See blkdev.h */ |
void exit_io_context(void); |
struct task_struct { |
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
struct thread_info *thread_info; |
atomic_t usage; |
unsigned long flags; /* per process flags, defined below */ |
unsigned long ptrace; |
int lock_depth; /* Lock depth */ |
int prio, static_prio; |
struct list_head run_list; |
prio_array_t *array; |
unsigned long sleep_avg; |
long interactive_credit; |
unsigned long long timestamp; |
int activated; |
unsigned long policy; |
cpumask_t cpus_allowed; |
unsigned int time_slice, first_time_slice; |
struct list_head tasks; |
struct list_head ptrace_children; |
struct list_head ptrace_list; |
struct mm_struct *mm, *active_mm; |
/* task state */ |
struct linux_binfmt *binfmt; |
int exit_code, exit_signal; |
int pdeath_signal; /* The signal sent when the parent dies */ |
/* ??? */ |
unsigned long personality; |
int did_exec:1; |
pid_t pid; |
pid_t __pgrp; /* Accessed via process_group() */ |
pid_t tty_old_pgrp; |
pid_t session; |
pid_t tgid; |
/* boolean value for session group leader */ |
int leader; |
/* |
* pointers to (original) parent process, youngest child, younger sibling, |
* older sibling, respectively. (p->father can be replaced with |
* p->parent->pid) |
*/ |
struct task_struct *real_parent; /* real parent process (when being debugged) */ |
struct task_struct *parent; /* parent process */ |
struct list_head children; /* list of my children */ |
struct list_head sibling; /* linkage in my parent's children list */ |
struct task_struct *group_leader; /* threadgroup leader */ |
/* PID/PID hash table linkage. */ |
struct pid_link pids[PIDTYPE_MAX]; |
wait_queue_head_t wait_chldexit; /* for wait4() */ |
struct completion *vfork_done; /* for vfork() */ |
int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
unsigned long rt_priority; |
unsigned long it_real_value, it_prof_value, it_virt_value; |
unsigned long it_real_incr, it_prof_incr, it_virt_incr; |
struct timer_list real_timer; |
struct list_head posix_timers; /* POSIX.1b Interval Timers */ |
unsigned long utime, stime, cutime, cstime; |
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; /* context switch counts */ |
u64 start_time; |
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; |
/* process credentials */ |
uid_t uid,euid,suid,fsuid; |
gid_t gid,egid,sgid,fsgid; |
int ngroups; |
gid_t groups[NGROUPS]; |
kernel_cap_t cap_effective, cap_inheritable, cap_permitted; |
int keep_capabilities:1; |
struct user_struct *user; |
/* limits */ |
struct rlimit rlim[RLIM_NLIMITS]; |
unsigned short used_math; |
char comm[16]; |
/* file system info */ |
int link_count, total_link_count; |
struct tty_struct *tty; /* NULL if no tty */ |
/* ipc stuff */ |
struct sysv_sem sysvsem; |
/* CPU-specific state of this task */ |
struct thread_struct thread; |
/* filesystem information */ |
struct fs_struct *fs; |
/* open file information */ |
struct files_struct *files; |
/* namespace */ |
struct namespace *namespace; |
/* signal handlers */ |
struct signal_struct *signal; |
struct sighand_struct *sighand; |
sigset_t blocked, real_blocked; |
struct sigpending pending; |
unsigned long sas_ss_sp; |
size_t sas_ss_size; |
int (*notifier)(void *priv); |
void *notifier_data; |
sigset_t *notifier_mask; |
void *security; |
/* Thread group tracking */ |
u32 parent_exec_id; |
u32 self_exec_id; |
/* Protection of (de-)allocation: mm, files, fs, tty */ |
spinlock_t alloc_lock; |
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ |
spinlock_t proc_lock; |
/* context-switch lock */ |
spinlock_t switch_lock; |
/* journalling filesystem info */ |
void *journal_info; |
/* VM state */ |
struct reclaim_state *reclaim_state; |
struct dentry *proc_dentry; |
struct backing_dev_info *backing_dev_info; |
struct io_context *io_context; |
unsigned long ptrace_message; |
siginfo_t *last_siginfo; /* For ptrace use. */ |
}; |
static inline pid_t process_group(struct task_struct *tsk) |
{ |
return tsk->group_leader->__pgrp; |
} |
extern void __put_task_struct(struct task_struct *tsk); |
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
#define put_task_struct(tsk) \ |
do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) |
/* |
* Per process flags |
*/ |
#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ |
/* Not implemented yet, only for 486*/ |
#define PF_STARTING 0x00000002 /* being created */ |
#define PF_EXITING 0x00000004 /* getting shut down */ |
#define PF_DEAD 0x00000008 /* Dead */ |
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
#define PF_DUMPCORE 0x00000200 /* dumped core */ |
#define PF_SIGNALED 0x00000400 /* killed by a signal */ |
#define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ |
#define PF_FLUSHER 0x00002000 /* responsible for disk writeback */ |
#define PF_FREEZE 0x00004000 /* this task should be frozen for suspend */ |
#define PF_IOTHREAD 0x00008000 /* this thread is needed for doing I/O to swap */ |
#define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
#define PF_KSWAPD 0x00040000 /* I am kswapd */ |
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ |
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ |
#ifdef CONFIG_SMP |
extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); |
#else |
static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) |
{ |
return 0; |
} |
#endif |
extern unsigned long long sched_clock(void); |
#ifdef CONFIG_NUMA |
extern void sched_balance_exec(void); |
extern void node_nr_running_init(void); |
#else |
#define sched_balance_exec() {} |
#define node_nr_running_init() {} |
#endif |
extern void set_user_nice(task_t *p, long nice); |
extern int task_prio(task_t *p); |
extern int task_nice(task_t *p); |
extern int task_curr(task_t *p); |
extern int idle_cpu(int cpu); |
void yield(void); |
/* |
* The default (Linux) execution domain. |
*/ |
extern struct exec_domain default_exec_domain; |
#ifndef INIT_THREAD_SIZE |
# define INIT_THREAD_SIZE 2048*sizeof(long) |
#endif |
union thread_union { |
struct thread_info thread_info; |
unsigned long stack[INIT_THREAD_SIZE/sizeof(long)]; |
}; |
#ifndef __HAVE_ARCH_KSTACK_END |
static inline int kstack_end(void *addr) |
{ |
/* Reliable end of stack detection: |
* Some APM bios versions misalign the stack |
*/ |
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); |
} |
#endif |
extern union thread_union init_thread_union; |
extern struct task_struct init_task; |
extern struct mm_struct init_mm; |
extern struct task_struct *find_task_by_pid(int pid); |
extern void set_special_pids(pid_t session, pid_t pgrp); |
extern void __set_special_pids(pid_t session, pid_t pgrp); |
/* per-UID process charging. */ |
extern struct user_struct * alloc_uid(uid_t); |
extern void free_uid(struct user_struct *); |
extern void switch_uid(struct user_struct *); |
#include <asm/current.h> |
extern unsigned long itimer_ticks; |
extern unsigned long itimer_next; |
extern void do_timer(struct pt_regs *); |
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); |
extern int FASTCALL(wake_up_process(struct task_struct * tsk)); |
#ifdef CONFIG_SMP |
extern void FASTCALL(kick_process(struct task_struct * tsk)); |
#else |
static inline void kick_process(struct task_struct *tsk) { } |
#endif |
extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); |
extern void FASTCALL(sched_exit(task_t * p)); |
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); |
extern int in_group_p(gid_t); |
extern int in_egroup_p(gid_t); |
extern void proc_caches_init(void); |
extern void flush_signals(struct task_struct *); |
extern void flush_signal_handlers(struct task_struct *, int force_default); |
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
{ |
unsigned long flags; |
int ret; |
spin_lock_irqsave(&tsk->sighand->siglock, flags); |
ret = dequeue_signal(tsk, mask, info); |
spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
return ret; |
} |
extern void block_all_signals(int (*notifier)(void *priv), void *priv, |
sigset_t *mask); |
extern void unblock_all_signals(void); |
extern void release_task(struct task_struct * p); |
extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); |
extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); |
extern int kill_pg_info(int, struct siginfo *, pid_t); |
extern int kill_sl_info(int, struct siginfo *, pid_t); |
extern int kill_proc_info(int, struct siginfo *, pid_t); |
extern void notify_parent(struct task_struct *, int); |
extern void do_notify_parent(struct task_struct *, int); |
extern void force_sig(int, struct task_struct *); |
extern void force_sig_specific(int, struct task_struct *); |
extern int send_sig(int, struct task_struct *, int); |
extern void zap_other_threads(struct task_struct *p); |
extern int kill_pg(pid_t, int, int); |
extern int kill_sl(pid_t, int, int); |
extern int kill_proc(pid_t, int, int); |
extern struct sigqueue *sigqueue_alloc(void); |
extern void sigqueue_free(struct sigqueue *); |
extern int send_sigqueue(int, struct sigqueue *, struct task_struct *); |
extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *); |
extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); |
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); |
/* These can be the second arg to send_sig_info/send_group_sig_info. */ |
#define SEND_SIG_NOINFO ((struct siginfo *) 0) |
#define SEND_SIG_PRIV ((struct siginfo *) 1) |
#define SEND_SIG_FORCED ((struct siginfo *) 2) |
/* True if we are on the alternate signal stack. */ |
static inline int on_sig_stack(unsigned long sp) |
{ |
return (sp - current->sas_ss_sp < current->sas_ss_size); |
} |
static inline int sas_ss_flags(unsigned long sp) |
{ |
return (current->sas_ss_size == 0 ? SS_DISABLE |
: on_sig_stack(sp) ? SS_ONSTACK : 0); |
} |
#ifdef CONFIG_SECURITY |
/* code is in security.c */ |
extern int capable(int cap); |
#else |
static inline int capable(int cap) |
{ |
if (cap_raised(current->cap_effective, cap)) { |
current->flags |= PF_SUPERPRIV; |
return 1; |
} |
return 0; |
} |
#endif |
/* |
* Routines for handling mm_structs |
*/ |
extern struct mm_struct * mm_alloc(void); |
/* mmdrop drops the mm and the page tables */ |
extern inline void FASTCALL(__mmdrop(struct mm_struct *)); |
static inline void mmdrop(struct mm_struct * mm) |
{ |
if (atomic_dec_and_test(&mm->mm_count)) |
__mmdrop(mm); |
} |
/* mmput gets rid of the mappings and all user-space */ |
extern void mmput(struct mm_struct *); |
/* Grab a reference to the mm if its not already going away */ |
extern struct mm_struct *mmgrab(struct mm_struct *); |
/* Remove the current tasks stale references to the old mm_struct */ |
extern void mm_release(struct task_struct *, struct mm_struct *); |
extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); |
extern void flush_thread(void); |
extern void exit_thread(void); |
extern void exit_mm(struct task_struct *); |
extern void exit_files(struct task_struct *); |
extern void exit_signal(struct task_struct *); |
extern void __exit_signal(struct task_struct *); |
extern void exit_sighand(struct task_struct *); |
extern void __exit_sighand(struct task_struct *); |
extern void exit_itimers(struct task_struct *); |
extern NORET_TYPE void do_group_exit(int); |
extern void reparent_to_init(void); |
extern void daemonize(const char *, ...); |
extern int allow_signal(int); |
extern int disallow_signal(int); |
extern task_t *child_reaper; |
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); |
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
extern struct task_struct * copy_process(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
#ifdef CONFIG_SMP |
extern void wait_task_inactive(task_t * p); |
#else |
#define wait_task_inactive(p) do { } while (0) |
#endif |
#define remove_parent(p) list_del_init(&(p)->sibling) |
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) |
#define REMOVE_LINKS(p) do { \ |
if (thread_group_leader(p)) \ |
list_del_init(&(p)->tasks); \ |
remove_parent(p); \ |
} while (0) |
#define SET_LINKS(p) do { \ |
if (thread_group_leader(p)) \ |
list_add_tail(&(p)->tasks,&init_task.tasks); \ |
add_parent(p, (p)->parent); \ |
} while (0) |
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) |
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) |
#define for_each_process(p) \ |
for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
/* |
* Careful: do_each_thread/while_each_thread is a double loop so |
* 'break' will not work as expected - use goto instead. |
*/ |
#define do_each_thread(g, t) \ |
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
#define while_each_thread(g, t) \ |
while ((t = next_thread(t)) != g) |
extern task_t * FASTCALL(next_thread(task_t *p)); |
#define thread_group_leader(p) (p->pid == p->tgid) |
static inline int thread_group_empty(task_t *p) |
{ |
struct pid *pid = p->pids[PIDTYPE_TGID].pidptr; |
return pid->task_list.next->next == &pid->task_list; |
} |
#define delay_group_leader(p) \ |
(thread_group_leader(p) && !thread_group_empty(p)) |
extern void unhash_process(struct task_struct *p); |
/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). |
* Nests both inside and outside of read_lock(&tasklist_lock). |
* It must not be nested with write_lock_irq(&tasklist_lock), |
* neither inside nor outside. |
*/ |
static inline void task_lock(struct task_struct *p) |
{ |
spin_lock(&p->alloc_lock); |
} |
static inline void task_unlock(struct task_struct *p) |
{ |
spin_unlock(&p->alloc_lock); |
} |
/** |
* get_task_mm - acquire a reference to the task's mm |
* |
* Returns %NULL if the task has no mm. User must release |
* the mm via mmput() after use. |
*/ |
static inline struct mm_struct * get_task_mm(struct task_struct * task) |
{ |
struct mm_struct * mm; |
task_lock(task); |
mm = task->mm; |
if (mm) |
mm = mmgrab(mm); |
task_unlock(task); |
return mm; |
} |
/* set thread flags in other task's structures |
* - see asm/thread_info.h for TIF_xxxx flags available |
*/ |
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
set_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
clear_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_and_set_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_and_clear_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline void set_tsk_need_resched(struct task_struct *tsk) |
{ |
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
} |
static inline void clear_tsk_need_resched(struct task_struct *tsk) |
{ |
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
} |
static inline int signal_pending(struct task_struct *p) |
{ |
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
} |
static inline int need_resched(void) |
{ |
return unlikely(test_thread_flag(TIF_NEED_RESCHED)); |
} |
extern void __cond_resched(void); |
static inline void cond_resched(void) |
{ |
if (need_resched()) |
__cond_resched(); |
} |
/* |
* cond_resched_lock() - if a reschedule is pending, drop the given lock, |
* call schedule, and on return reacquire the lock. |
* |
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
* operations here to prevent schedule() from being called twice (once via |
* spin_unlock(), once by hand). |
*/ |
static inline void cond_resched_lock(spinlock_t * lock) |
{ |
if (need_resched()) { |
_raw_spin_unlock(lock); |
preempt_enable_no_resched(); |
__cond_resched(); |
spin_lock(lock); |
} |
} |
/* Reevaluate whether the task has signals pending delivery. |
This is required every time the blocked sigset_t changes. |
callers must hold sighand->siglock. */ |
extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); |
extern void recalc_sigpending(void); |
extern void signal_wake_up(struct task_struct *t, int resume_stopped); |
/* |
* Wrappers for p->thread_info->cpu access. No-op on UP. |
*/ |
#ifdef CONFIG_SMP |
static inline unsigned int task_cpu(struct task_struct *p) |
{ |
return p->thread_info->cpu; |
} |
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
{ |
p->thread_info->cpu = cpu; |
} |
#else |
static inline unsigned int task_cpu(struct task_struct *p) |
{ |
return 0; |
} |
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
{ |
} |
#endif /* CONFIG_SMP */ |
#endif /* __KERNEL__ */ |
#endif |
#ifndef _LINUX_SCHED_H |
#define _LINUX_SCHED_H |
#include <asm/param.h> /* for HZ */ |
#include <linux/config.h> |
#include <linux/capability.h> |
#include <linux/threads.h> |
#include <linux/kernel.h> |
#include <linux/types.h> |
#include <linux/timex.h> |
#include <linux/jiffies.h> |
#include <linux/rbtree.h> |
#include <linux/thread_info.h> |
#include <linux/cpumask.h> |
#include <asm/system.h> |
#include <asm/semaphore.h> |
#include <asm/page.h> |
#include <asm/ptrace.h> |
#include <asm/mmu.h> |
#include <linux/smp.h> |
#include <linux/sem.h> |
#include <linux/signal.h> |
#include <linux/securebits.h> |
#include <linux/fs_struct.h> |
#include <linux/compiler.h> |
#include <linux/completion.h> |
#include <linux/pid.h> |
#include <linux/percpu.h> |
struct exec_domain; |
/* |
* cloning flags: |
*/ |
#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ |
#define CLONE_VM 0x00000100 /* set if VM shared between processes */ |
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ |
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ |
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ |
#define CLONE_IDLETASK 0x00001000 /* set if new pid should be 0 (kernel only)*/ |
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ |
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ |
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ |
#define CLONE_THREAD 0x00010000 /* Same thread group? */ |
#define CLONE_NEWNS 0x00020000 /* New namespace group? */ |
#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ |
#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ |
#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ |
#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ |
#define CLONE_DETACHED 0x00400000 /* Not used - CLONE_THREAD implies detached uniquely */ |
#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ |
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ |
#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ |
/* |
* List of flags we want to share for kernel threads, |
* if only because they are not used by them anyway. |
*/ |
#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) |
/* |
* These are the constant used to fake the fixed-point load-average |
* counting. Some notes: |
* - 11 bit fractions expand to 22 bits by the multiplies: this gives |
* a load-average precision of 10 bits integer + 11 bits fractional |
* - if you want to count load-averages more often, you need more |
* precision, or rounding will get you. With 2-second counting freq, |
* the EXP_n values would be 1981, 2034 and 2043 if still using only |
* 11 bit fractions. |
*/ |
extern unsigned long avenrun[]; /* Load averages */ |
#define FSHIFT 11 /* nr of bits of precision */ |
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ |
#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ |
#define EXP_5 2014 /* 1/exp(5sec/5min) */ |
#define EXP_15 2037 /* 1/exp(5sec/15min) */ |
#define CALC_LOAD(load,exp,n) \ |
load *= exp; \ |
load += n*(FIXED_1-exp); \ |
load >>= FSHIFT; |
#define CT_TO_SECS(x) ((x) / HZ) |
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) |
extern int nr_threads; |
extern int last_pid; |
DECLARE_PER_CPU(unsigned long, process_counts); |
extern int nr_processes(void); |
extern unsigned long nr_running(void); |
extern unsigned long nr_uninterruptible(void); |
extern unsigned long nr_iowait(void); |
#include <linux/time.h> |
#include <linux/param.h> |
#include <linux/resource.h> |
#include <linux/timer.h> |
#include <asm/processor.h> |
#define TASK_RUNNING 0 |
#define TASK_INTERRUPTIBLE 1 |
#define TASK_UNINTERRUPTIBLE 2 |
#define TASK_STOPPED 4 |
#define TASK_ZOMBIE 8 |
#define TASK_DEAD 16 |
#define __set_task_state(tsk, state_value) \ |
do { (tsk)->state = (state_value); } while (0) |
#define set_task_state(tsk, state_value) \ |
set_mb((tsk)->state, (state_value)) |
#define __set_current_state(state_value) \ |
do { current->state = (state_value); } while (0) |
#define set_current_state(state_value) \ |
set_mb(current->state, (state_value)) |
/* |
* Scheduling policies |
*/ |
#define SCHED_NORMAL 0 |
#define SCHED_FIFO 1 |
#define SCHED_RR 2 |
struct sched_param { |
int sched_priority; |
}; |
#ifdef __KERNEL__ |
#include <linux/spinlock.h> |
/* |
* This serializes "schedule()" and also protects |
* the run-queue from deletions/modifications (but |
* _adding_ to the beginning of the run-queue has |
* a separate lock). |
*/ |
extern rwlock_t tasklist_lock; |
extern spinlock_t mmlist_lock; |
typedef struct task_struct task_t; |
extern void sched_init(void); |
extern void init_idle(task_t *idle, int cpu); |
extern void show_state(void); |
extern void show_regs(struct pt_regs *); |
/* |
* TASK is a pointer to the task whose backtrace we want to see (or NULL for current |
* task), SP is the stack pointer of the first frame that should be shown in the back |
* trace (or NULL if the entire call-chain of the task should be shown). |
*/ |
extern void show_stack(struct task_struct *task, unsigned long *sp); |
void io_schedule(void); |
long io_schedule_timeout(long timeout); |
extern void cpu_init (void); |
extern void trap_init(void); |
extern void update_process_times(int user); |
extern void update_one_process(struct task_struct *p, unsigned long user, |
unsigned long system, int cpu); |
extern void scheduler_tick(int user_tick, int system); |
extern unsigned long cache_decay_ticks; |
#define MAX_SCHEDULE_TIMEOUT LONG_MAX |
extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
asmlinkage void schedule(void); |
struct namespace; |
/* Maximum number of active map areas.. This is a random (large) number */ |
#define MAX_MAP_COUNT (65536) |
#include <linux/aio.h> |
struct mm_struct { |
struct vm_area_struct * mmap; /* list of VMAs */ |
struct rb_root mm_rb; |
struct vm_area_struct * mmap_cache; /* last find_vma result */ |
unsigned long free_area_cache; /* first hole */ |
pgd_t * pgd; |
atomic_t mm_users; /* How many users with user space? */ |
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ |
int map_count; /* number of VMAs */ |
struct rw_semaphore mmap_sem; |
spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ |
struct list_head mmlist; /* List of all active mm's. These are globally strung |
* together off init_mm.mmlist, and are protected |
* by mmlist_lock |
*/ |
unsigned long start_code, end_code, start_data, end_data; |
unsigned long start_brk, brk, start_stack; |
unsigned long arg_start, arg_end, env_start, env_end; |
unsigned long rss, total_vm, locked_vm; |
unsigned long def_flags; |
cpumask_t cpu_vm_mask; |
unsigned long swap_address; |
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */ |
unsigned dumpable:1; |
#ifdef CONFIG_HUGETLB_PAGE |
int used_hugetlb; |
#endif |
/* Architecture-specific MM context */ |
mm_context_t context; |
/* coredumping support */ |
int core_waiters; |
struct completion *core_startup_done, core_done; |
/* aio bits */ |
rwlock_t ioctx_list_lock; |
struct kioctx *ioctx_list; |
struct kioctx default_kioctx; |
}; |
extern int mmlist_nr; |
struct sighand_struct { |
atomic_t count; |
struct k_sigaction action[_NSIG]; |
spinlock_t siglock; |
}; |
/* |
* NOTE! "signal_struct" does not have it's own |
* locking, because a shared signal_struct always |
* implies a shared sighand_struct, so locking |
* sighand_struct is always a proper superset of |
* the locking of signal_struct. |
*/ |
struct signal_struct { |
atomic_t count; |
/* current thread group signal load-balancing target: */ |
task_t *curr_target; |
/* shared signal handling: */ |
struct sigpending shared_pending; |
/* thread group exit support */ |
int group_exit; |
int group_exit_code; |
/* overloaded: |
* - notify group_exit_task when ->count is equal to notify_count |
* - everyone except group_exit_task is stopped during signal delivery |
* of fatal signals, group_exit_task processes the signal. |
*/ |
struct task_struct *group_exit_task; |
int notify_count; |
/* thread group stop support, overloads group_exit_code too */ |
int group_stop_count; |
}; |
/* |
* Priority of a process goes from 0..MAX_PRIO-1, valid RT |
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are |
* in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values |
* are inverted: lower p->prio value means higher priority. |
* |
* The MAX_RT_USER_PRIO value allows the actual maximum |
* RT priority to be separate from the value exported to |
* user-space. This allows kernel threads to set their |
* priority to a value higher than any user task. Note: |
* MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. |
*/ |
#define MAX_USER_RT_PRIO 100 |
#define MAX_RT_PRIO MAX_USER_RT_PRIO |
#define MAX_PRIO (MAX_RT_PRIO + 40) |
#define rt_task(p) ((p)->prio < MAX_RT_PRIO) |
/* |
* Some day this will be a full-fledged user tracking system.. |
*/ |
struct user_struct { |
atomic_t __count; /* reference count */ |
atomic_t processes; /* How many processes does this user have? */ |
atomic_t files; /* How many open files does this user have? */ |
/* Hash table maintenance information */ |
struct list_head uidhash_list; |
uid_t uid; |
}; |
extern struct user_struct *find_user(uid_t); |
extern struct user_struct root_user; |
#define INIT_USER (&root_user) |
typedef struct prio_array prio_array_t; |
struct backing_dev_info; |
struct reclaim_state; |
/* POSIX.1b interval timer structure. */ |
struct k_itimer { |
struct list_head list; /* free/ allocate list */ |
spinlock_t it_lock; |
clockid_t it_clock; /* which timer type */ |
timer_t it_id; /* timer id */ |
int it_overrun; /* overrun on pending signal */ |
int it_overrun_last; /* overrun on last delivered signal */ |
int it_requeue_pending; /* waiting to requeue this timer */ |
int it_sigev_notify; /* notify word of sigevent struct */ |
int it_sigev_signo; /* signo word of sigevent struct */ |
sigval_t it_sigev_value; /* value word of sigevent struct */ |
unsigned long it_incr; /* interval specified in jiffies */ |
struct task_struct *it_process; /* process to send signal to */ |
struct timer_list it_timer; |
struct sigqueue *sigq; /* signal queue entry. */ |
}; |
struct io_context; /* See blkdev.h */ |
void exit_io_context(void); |
struct task_struct { |
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
struct thread_info *thread_info; |
atomic_t usage; |
unsigned long flags; /* per process flags, defined below */ |
unsigned long ptrace; |
int lock_depth; /* Lock depth */ |
int prio, static_prio; |
struct list_head run_list; |
prio_array_t *array; |
unsigned long sleep_avg; |
long interactive_credit; |
unsigned long long timestamp; |
int activated; |
unsigned long policy; |
cpumask_t cpus_allowed; |
unsigned int time_slice, first_time_slice; |
struct list_head tasks; |
struct list_head ptrace_children; |
struct list_head ptrace_list; |
struct mm_struct *mm, *active_mm; |
/* task state */ |
struct linux_binfmt *binfmt; |
int exit_code, exit_signal; |
int pdeath_signal; /* The signal sent when the parent dies */ |
/* ??? */ |
unsigned long personality; |
int did_exec:1; |
pid_t pid; |
pid_t __pgrp; /* Accessed via process_group() */ |
pid_t tty_old_pgrp; |
pid_t session; |
pid_t tgid; |
/* boolean value for session group leader */ |
int leader; |
/* |
* pointers to (original) parent process, youngest child, younger sibling, |
* older sibling, respectively. (p->father can be replaced with |
* p->parent->pid) |
*/ |
struct task_struct *real_parent; /* real parent process (when being debugged) */ |
struct task_struct *parent; /* parent process */ |
struct list_head children; /* list of my children */ |
struct list_head sibling; /* linkage in my parent's children list */ |
struct task_struct *group_leader; /* threadgroup leader */ |
/* PID/PID hash table linkage. */ |
struct pid_link pids[PIDTYPE_MAX]; |
wait_queue_head_t wait_chldexit; /* for wait4() */ |
struct completion *vfork_done; /* for vfork() */ |
int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
unsigned long rt_priority; |
unsigned long it_real_value, it_prof_value, it_virt_value; |
unsigned long it_real_incr, it_prof_incr, it_virt_incr; |
struct timer_list real_timer; |
struct list_head posix_timers; /* POSIX.1b Interval Timers */ |
unsigned long utime, stime, cutime, cstime; |
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; /* context switch counts */ |
u64 start_time; |
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; |
/* process credentials */ |
uid_t uid,euid,suid,fsuid; |
gid_t gid,egid,sgid,fsgid; |
int ngroups; |
gid_t groups[NGROUPS]; |
kernel_cap_t cap_effective, cap_inheritable, cap_permitted; |
int keep_capabilities:1; |
struct user_struct *user; |
/* limits */ |
struct rlimit rlim[RLIM_NLIMITS]; |
unsigned short used_math; |
char comm[16]; |
/* file system info */ |
int link_count, total_link_count; |
struct tty_struct *tty; /* NULL if no tty */ |
/* ipc stuff */ |
struct sysv_sem sysvsem; |
/* CPU-specific state of this task */ |
struct thread_struct thread; |
/* filesystem information */ |
struct fs_struct *fs; |
/* open file information */ |
struct files_struct *files; |
/* namespace */ |
struct namespace *namespace; |
/* signal handlers */ |
struct signal_struct *signal; |
struct sighand_struct *sighand; |
sigset_t blocked, real_blocked; |
struct sigpending pending; |
unsigned long sas_ss_sp; |
size_t sas_ss_size; |
int (*notifier)(void *priv); |
void *notifier_data; |
sigset_t *notifier_mask; |
void *security; |
/* Thread group tracking */ |
u32 parent_exec_id; |
u32 self_exec_id; |
/* Protection of (de-)allocation: mm, files, fs, tty */ |
spinlock_t alloc_lock; |
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ |
spinlock_t proc_lock; |
/* context-switch lock */ |
spinlock_t switch_lock; |
/* journalling filesystem info */ |
void *journal_info; |
/* VM state */ |
struct reclaim_state *reclaim_state; |
struct dentry *proc_dentry; |
struct backing_dev_info *backing_dev_info; |
struct io_context *io_context; |
unsigned long ptrace_message; |
siginfo_t *last_siginfo; /* For ptrace use. */ |
}; |
static inline pid_t process_group(struct task_struct *tsk) |
{ |
return tsk->group_leader->__pgrp; |
} |
extern void __put_task_struct(struct task_struct *tsk); |
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
#define put_task_struct(tsk) \ |
do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) |
/* |
* Per process flags |
*/ |
#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ |
/* Not implemented yet, only for 486*/ |
#define PF_STARTING 0x00000002 /* being created */ |
#define PF_EXITING 0x00000004 /* getting shut down */ |
#define PF_DEAD 0x00000008 /* Dead */ |
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
#define PF_DUMPCORE 0x00000200 /* dumped core */ |
#define PF_SIGNALED 0x00000400 /* killed by a signal */ |
#define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ |
#define PF_FLUSHER 0x00002000 /* responsible for disk writeback */ |
#define PF_FREEZE 0x00004000 /* this task should be frozen for suspend */ |
#define PF_IOTHREAD 0x00008000 /* this thread is needed for doing I/O to swap */ |
#define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
#define PF_KSWAPD 0x00040000 /* I am kswapd */ |
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ |
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ |
#ifdef CONFIG_SMP |
extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); |
#else |
static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) |
{ |
return 0; |
} |
#endif |
extern unsigned long long sched_clock(void); |
#ifdef CONFIG_NUMA |
extern void sched_balance_exec(void); |
extern void node_nr_running_init(void); |
#else |
#define sched_balance_exec() {} |
#define node_nr_running_init() {} |
#endif |
extern void set_user_nice(task_t *p, long nice); |
extern int task_prio(task_t *p); |
extern int task_nice(task_t *p); |
extern int task_curr(task_t *p); |
extern int idle_cpu(int cpu); |
void yield(void); |
/* |
* The default (Linux) execution domain. |
*/ |
extern struct exec_domain default_exec_domain; |
#ifndef INIT_THREAD_SIZE |
# define INIT_THREAD_SIZE 2048*sizeof(long) |
#endif |
union thread_union { |
struct thread_info thread_info; |
unsigned long stack[INIT_THREAD_SIZE/sizeof(long)]; |
}; |
#ifndef __HAVE_ARCH_KSTACK_END |
static inline int kstack_end(void *addr) |
{ |
/* Reliable end of stack detection: |
* Some APM bios versions misalign the stack |
*/ |
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); |
} |
#endif |
extern union thread_union init_thread_union; |
extern struct task_struct init_task; |
extern struct mm_struct init_mm; |
extern struct task_struct *find_task_by_pid(int pid); |
extern void set_special_pids(pid_t session, pid_t pgrp); |
extern void __set_special_pids(pid_t session, pid_t pgrp); |
/* per-UID process charging. */ |
extern struct user_struct * alloc_uid(uid_t); |
extern void free_uid(struct user_struct *); |
extern void switch_uid(struct user_struct *); |
#include <asm/current.h> |
extern unsigned long itimer_ticks; |
extern unsigned long itimer_next; |
extern void do_timer(struct pt_regs *); |
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); |
extern int FASTCALL(wake_up_process(struct task_struct * tsk)); |
#ifdef CONFIG_SMP |
extern void FASTCALL(kick_process(struct task_struct * tsk)); |
#else |
static inline void kick_process(struct task_struct *tsk) { } |
#endif |
extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); |
extern void FASTCALL(sched_exit(task_t * p)); |
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); |
extern int in_group_p(gid_t); |
extern int in_egroup_p(gid_t); |
extern void proc_caches_init(void); |
extern void flush_signals(struct task_struct *); |
extern void flush_signal_handlers(struct task_struct *, int force_default); |
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
{ |
unsigned long flags; |
int ret; |
spin_lock_irqsave(&tsk->sighand->siglock, flags); |
ret = dequeue_signal(tsk, mask, info); |
spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
return ret; |
} |
extern void block_all_signals(int (*notifier)(void *priv), void *priv, |
sigset_t *mask); |
extern void unblock_all_signals(void); |
extern void release_task(struct task_struct * p); |
extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); |
extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); |
extern int kill_pg_info(int, struct siginfo *, pid_t); |
extern int kill_sl_info(int, struct siginfo *, pid_t); |
extern int kill_proc_info(int, struct siginfo *, pid_t); |
extern void notify_parent(struct task_struct *, int); |
extern void do_notify_parent(struct task_struct *, int); |
extern void force_sig(int, struct task_struct *); |
extern void force_sig_specific(int, struct task_struct *); |
extern int send_sig(int, struct task_struct *, int); |
extern void zap_other_threads(struct task_struct *p); |
extern int kill_pg(pid_t, int, int); |
extern int kill_sl(pid_t, int, int); |
extern int kill_proc(pid_t, int, int); |
extern struct sigqueue *sigqueue_alloc(void); |
extern void sigqueue_free(struct sigqueue *); |
extern int send_sigqueue(int, struct sigqueue *, struct task_struct *); |
extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *); |
extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); |
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); |
/* These can be the second arg to send_sig_info/send_group_sig_info. */ |
#define SEND_SIG_NOINFO ((struct siginfo *) 0) |
#define SEND_SIG_PRIV ((struct siginfo *) 1) |
#define SEND_SIG_FORCED ((struct siginfo *) 2) |
/* True if we are on the alternate signal stack. */ |
static inline int on_sig_stack(unsigned long sp) |
{ |
return (sp - current->sas_ss_sp < current->sas_ss_size); |
} |
static inline int sas_ss_flags(unsigned long sp) |
{ |
return (current->sas_ss_size == 0 ? SS_DISABLE |
: on_sig_stack(sp) ? SS_ONSTACK : 0); |
} |
#ifdef CONFIG_SECURITY |
/* code is in security.c */ |
extern int capable(int cap); |
#else |
static inline int capable(int cap) |
{ |
if (cap_raised(current->cap_effective, cap)) { |
current->flags |= PF_SUPERPRIV; |
return 1; |
} |
return 0; |
} |
#endif |
/* |
* Routines for handling mm_structs |
*/ |
extern struct mm_struct * mm_alloc(void); |
/* mmdrop drops the mm and the page tables */ |
extern inline void FASTCALL(__mmdrop(struct mm_struct *)); |
static inline void mmdrop(struct mm_struct * mm) |
{ |
if (atomic_dec_and_test(&mm->mm_count)) |
__mmdrop(mm); |
} |
/* mmput gets rid of the mappings and all user-space */ |
extern void mmput(struct mm_struct *); |
/* Grab a reference to the mm if its not already going away */ |
extern struct mm_struct *mmgrab(struct mm_struct *); |
/* Remove the current tasks stale references to the old mm_struct */ |
extern void mm_release(struct task_struct *, struct mm_struct *); |
extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); |
extern void flush_thread(void); |
extern void exit_thread(void); |
extern void exit_mm(struct task_struct *); |
extern void exit_files(struct task_struct *); |
extern void exit_signal(struct task_struct *); |
extern void __exit_signal(struct task_struct *); |
extern void exit_sighand(struct task_struct *); |
extern void __exit_sighand(struct task_struct *); |
extern void exit_itimers(struct task_struct *); |
extern NORET_TYPE void do_group_exit(int); |
extern void reparent_to_init(void); |
extern void daemonize(const char *, ...); |
extern int allow_signal(int); |
extern int disallow_signal(int); |
extern task_t *child_reaper; |
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); |
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
extern struct task_struct * copy_process(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
#ifdef CONFIG_SMP |
extern void wait_task_inactive(task_t * p); |
#else |
#define wait_task_inactive(p) do { } while (0) |
#endif |
#define remove_parent(p) list_del_init(&(p)->sibling) |
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) |
#define REMOVE_LINKS(p) do { \ |
if (thread_group_leader(p)) \ |
list_del_init(&(p)->tasks); \ |
remove_parent(p); \ |
} while (0) |
#define SET_LINKS(p) do { \ |
if (thread_group_leader(p)) \ |
list_add_tail(&(p)->tasks,&init_task.tasks); \ |
add_parent(p, (p)->parent); \ |
} while (0) |
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) |
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) |
#define for_each_process(p) \ |
for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
/* |
* Careful: do_each_thread/while_each_thread is a double loop so |
* 'break' will not work as expected - use goto instead. |
*/ |
#define do_each_thread(g, t) \ |
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
#define while_each_thread(g, t) \ |
while ((t = next_thread(t)) != g) |
extern task_t * FASTCALL(next_thread(task_t *p)); |
#define thread_group_leader(p) (p->pid == p->tgid) |
static inline int thread_group_empty(task_t *p) |
{ |
struct pid *pid = p->pids[PIDTYPE_TGID].pidptr; |
return pid->task_list.next->next == &pid->task_list; |
} |
#define delay_group_leader(p) \ |
(thread_group_leader(p) && !thread_group_empty(p)) |
extern void unhash_process(struct task_struct *p); |
/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). |
* Nests both inside and outside of read_lock(&tasklist_lock). |
* It must not be nested with write_lock_irq(&tasklist_lock), |
* neither inside nor outside. |
*/ |
static inline void task_lock(struct task_struct *p) |
{ |
spin_lock(&p->alloc_lock); |
} |
static inline void task_unlock(struct task_struct *p) |
{ |
spin_unlock(&p->alloc_lock); |
} |
/** |
* get_task_mm - acquire a reference to the task's mm |
* |
* Returns %NULL if the task has no mm. User must release |
* the mm via mmput() after use. |
*/ |
static inline struct mm_struct * get_task_mm(struct task_struct * task) |
{ |
struct mm_struct * mm; |
task_lock(task); |
mm = task->mm; |
if (mm) |
mm = mmgrab(mm); |
task_unlock(task); |
return mm; |
} |
/* set thread flags in other task's structures |
* - see asm/thread_info.h for TIF_xxxx flags available |
*/ |
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
set_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
clear_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_and_set_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_and_clear_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
{ |
return test_ti_thread_flag(tsk->thread_info,flag); |
} |
static inline void set_tsk_need_resched(struct task_struct *tsk) |
{ |
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
} |
static inline void clear_tsk_need_resched(struct task_struct *tsk) |
{ |
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
} |
static inline int signal_pending(struct task_struct *p) |
{ |
return 0; //**unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
} |
static inline int need_resched(void) |
{ |
return unlikely(test_thread_flag(TIF_NEED_RESCHED)); |
} |
extern void __cond_resched(void); |
static inline void cond_resched(void) |
{ |
if (need_resched()) |
__cond_resched(); |
} |
/* |
* cond_resched_lock() - if a reschedule is pending, drop the given lock, |
* call schedule, and on return reacquire the lock. |
* |
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
* operations here to prevent schedule() from being called twice (once via |
* spin_unlock(), once by hand). |
*/ |
static inline void cond_resched_lock(spinlock_t * lock) |
{ |
if (need_resched()) { |
_raw_spin_unlock(lock); |
preempt_enable_no_resched(); |
__cond_resched(); |
spin_lock(lock); |
} |
} |
/* Reevaluate whether the task has signals pending delivery. |
This is required every time the blocked sigset_t changes. |
callers must hold sighand->siglock. */ |
extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); |
extern void recalc_sigpending(void); |
extern void signal_wake_up(struct task_struct *t, int resume_stopped); |
/* |
* Wrappers for p->thread_info->cpu access. No-op on UP. |
*/ |
#ifdef CONFIG_SMP |
static inline unsigned int task_cpu(struct task_struct *p) |
{ |
return p->thread_info->cpu; |
} |
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
{ |
p->thread_info->cpu = cpu; |
} |
#else |
static inline unsigned int task_cpu(struct task_struct *p) |
{ |
return 0; |
} |
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
{ |
} |
#endif /* CONFIG_SMP */ |
#endif /* __KERNEL__ */ |
#endif |