/shark/trunk/drivers/usb/host/ohci-mem.c |
---|
1,147 → 1,147 |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* This file is licenced under the GPL. |
*/ |
/*-------------------------------------------------------------------------*/ |
/* |
* There's basically three types of memory: |
* - data used only by the HCD ... kmalloc is fine |
* - async and periodic schedules, shared by HC and HCD ... these |
* need to use pci_pool or pci_alloc_consistent |
* - driver buffers, read/written by HC ... the hcd glue or the |
* device driver provides us with dma addresses |
* |
* There's also PCI "register" data, which is memory mapped. |
* No memory seen by this driver is pagable. |
*/ |
/*-------------------------------------------------------------------------*/ |
static struct usb_hcd *ohci_hcd_alloc (void) |
{ |
struct ohci_hcd *ohci; |
ohci = (struct ohci_hcd *) kmalloc (sizeof *ohci, GFP_KERNEL); |
if (ohci != 0) { |
memset (ohci, 0, sizeof (struct ohci_hcd)); |
ohci->hcd.product_desc = "OHCI Host Controller"; |
return &ohci->hcd; |
} |
return 0; |
} |
static void ohci_hcd_free (struct usb_hcd *hcd) |
{ |
kfree (hcd_to_ohci (hcd)); |
} |
/*-------------------------------------------------------------------------*/ |
static int ohci_mem_init (struct ohci_hcd *ohci) |
{ |
ohci->td_cache = pci_pool_create ("ohci_td", ohci->hcd.pdev, |
sizeof (struct td), |
32 /* byte alignment */, |
0 /* no page-crossing issues */); |
if (!ohci->td_cache) |
return -ENOMEM; |
ohci->ed_cache = pci_pool_create ("ohci_ed", ohci->hcd.pdev, |
sizeof (struct ed), |
16 /* byte alignment */, |
0 /* no page-crossing issues */); |
if (!ohci->ed_cache) { |
pci_pool_destroy (ohci->td_cache); |
return -ENOMEM; |
} |
return 0; |
} |
static void ohci_mem_cleanup (struct ohci_hcd *ohci) |
{ |
if (ohci->td_cache) { |
pci_pool_destroy (ohci->td_cache); |
ohci->td_cache = 0; |
} |
if (ohci->ed_cache) { |
pci_pool_destroy (ohci->ed_cache); |
ohci->ed_cache = 0; |
} |
} |
/*-------------------------------------------------------------------------*/ |
/* ohci "done list" processing needs this mapping */ |
static inline struct td * |
dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma) |
{ |
struct td *td; |
td_dma &= TD_MASK; |
td = hc->td_hash [TD_HASH_FUNC(td_dma)]; |
while (td && td->td_dma != td_dma) |
td = td->td_hash; |
return td; |
} |
/* TDs ... */ |
static struct td * |
td_alloc (struct ohci_hcd *hc, int mem_flags) |
{ |
dma_addr_t dma; |
struct td *td; |
td = pci_pool_alloc (hc->td_cache, mem_flags, &dma); |
if (td) { |
/* in case hc fetches it, make it look dead */ |
memset (td, 0, sizeof *td); |
td->hwNextTD = cpu_to_le32 (dma); |
td->td_dma = dma; |
/* hashed in td_fill */ |
} |
return td; |
} |
static void |
td_free (struct ohci_hcd *hc, struct td *td) |
{ |
struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)]; |
while (*prev && *prev != td) |
prev = &(*prev)->td_hash; |
if (*prev) |
*prev = td->td_hash; |
else if ((td->hwINFO & TD_DONE) != 0) |
ohci_dbg (hc, "no hash for td %p\n", td); |
pci_pool_free (hc->td_cache, td, td->td_dma); |
} |
/*-------------------------------------------------------------------------*/ |
/* EDs ... */ |
static struct ed * |
ed_alloc (struct ohci_hcd *hc, int mem_flags) |
{ |
dma_addr_t dma; |
struct ed *ed; |
ed = pci_pool_alloc (hc->ed_cache, mem_flags, &dma); |
if (ed) { |
memset (ed, 0, sizeof (*ed)); |
INIT_LIST_HEAD (&ed->td_list); |
ed->dma = dma; |
} |
return ed; |
} |
static void |
ed_free (struct ohci_hcd *hc, struct ed *ed) |
{ |
pci_pool_free (hc->ed_cache, ed, ed->dma); |
} |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* This file is licenced under the GPL. |
*/ |
/*-------------------------------------------------------------------------*/ |
/* |
* There's basically three types of memory: |
* - data used only by the HCD ... kmalloc is fine |
* - async and periodic schedules, shared by HC and HCD ... these |
* need to use pci_pool or pci_alloc_consistent_usb |
* - driver buffers, read/written by HC ... the hcd glue or the |
* device driver provides us with dma addresses |
* |
* There's also PCI "register" data, which is memory mapped. |
* No memory seen by this driver is pagable. |
*/ |
/*-------------------------------------------------------------------------*/ |
static struct usb_hcd *ohci_hcd_alloc (void) |
{ |
struct ohci_hcd *ohci; |
ohci = (struct ohci_hcd *) kmalloc (sizeof *ohci, GFP_KERNEL); |
if (ohci != 0) { |
memset (ohci, 0, sizeof (struct ohci_hcd)); |
ohci->hcd.product_desc = "OHCI Host Controller"; |
return &ohci->hcd; |
} |
return 0; |
} |
static void ohci_hcd_free (struct usb_hcd *hcd) |
{ |
kfree (hcd_to_ohci (hcd)); |
} |
/*-------------------------------------------------------------------------*/ |
static int ohci_mem_init (struct ohci_hcd *ohci) |
{ |
ohci->td_cache = pci_pool_create ("ohci_td", ohci->hcd.pdev, |
sizeof (struct td), |
32 /* byte alignment */, |
0 /* no page-crossing issues */); |
if (!ohci->td_cache) |
return -ENOMEM; |
ohci->ed_cache = pci_pool_create ("ohci_ed", ohci->hcd.pdev, |
sizeof (struct ed), |
16 /* byte alignment */, |
0 /* no page-crossing issues */); |
if (!ohci->ed_cache) { |
pci_pool_destroy (ohci->td_cache); |
return -ENOMEM; |
} |
return 0; |
} |
static void ohci_mem_cleanup (struct ohci_hcd *ohci) |
{ |
if (ohci->td_cache) { |
pci_pool_destroy (ohci->td_cache); |
ohci->td_cache = 0; |
} |
if (ohci->ed_cache) { |
pci_pool_destroy (ohci->ed_cache); |
ohci->ed_cache = 0; |
} |
} |
/*-------------------------------------------------------------------------*/ |
/* ohci "done list" processing needs this mapping */ |
static inline struct td * |
dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma) |
{ |
struct td *td; |
td_dma &= TD_MASK; |
td = hc->td_hash [TD_HASH_FUNC(td_dma)]; |
while (td && td->td_dma != td_dma) |
td = td->td_hash; |
return td; |
} |
/* TDs ... */ |
static struct td * |
td_alloc (struct ohci_hcd *hc, int mem_flags) |
{ |
dma_addr_t dma; |
struct td *td; |
td = pci_pool_alloc_usb (hc->td_cache, mem_flags, &dma); |
if (td) { |
/* in case hc fetches it, make it look dead */ |
memset (td, 0, sizeof *td); |
td->hwNextTD = cpu_to_le32 (dma); |
td->td_dma = dma; |
/* hashed in td_fill */ |
} |
return td; |
} |
static void |
td_free (struct ohci_hcd *hc, struct td *td) |
{ |
struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)]; |
while (*prev && *prev != td) |
prev = &(*prev)->td_hash; |
if (*prev) |
*prev = td->td_hash; |
else if ((td->hwINFO & TD_DONE) != 0) |
ohci_dbg (hc, "no hash for td %p\n", td); |
pci_pool_free (hc->td_cache, td, td->td_dma); |
} |
/*-------------------------------------------------------------------------*/ |
/* EDs ... */ |
static struct ed * |
ed_alloc (struct ohci_hcd *hc, int mem_flags) |
{ |
dma_addr_t dma; |
struct ed *ed; |
ed = pci_pool_alloc_usb (hc->ed_cache, mem_flags, &dma); |
if (ed) { |
memset (ed, 0, sizeof (*ed)); |
INIT_LIST_HEAD (&ed->td_list); |
ed->dma = dma; |
} |
return ed; |
} |
static void |
ed_free (struct ohci_hcd *hc, struct ed *ed) |
{ |
pci_pool_free (hc->ed_cache, ed, ed->dma); |
} |
/shark/trunk/drivers/usb/host/ehci-hcd.c |
---|
121,8 → 121,10 |
/* Initial IRQ latency: lower than default */ |
static int log2_irq_thresh = 0; // 0 to 6 |
module_param (log2_irq_thresh, int, S_IRUGO); |
/*module_param (log2_irq_thresh, int, S_IRUGO); |
MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); |
*/ |
#define INTR_MASK (STS_IAA | STS_FATAL | STS_ERR | STS_INT) |
483,7 → 485,7 |
* involved with the root hub. |
*/ |
ehci->reboot_notifier.notifier_call = ehci_reboot; |
register_reboot_notifier (&ehci->reboot_notifier); |
// register_reboot_notifier (&ehci->reboot_notifier); |
ehci->hcd.state = USB_STATE_RUNNING; |
writel (FLAG_CF, &ehci->regs->configured_flag); |
540,7 → 542,7 |
/* let companion controllers work when we aren't */ |
writel (0, &ehci->regs->configured_flag); |
unregister_reboot_notifier (&ehci->reboot_notifier); |
// unregister_reboot_notifier (&ehci->reboot_notifier); |
remove_debug_files (ehci); |
1011,7 → 1013,7 |
MODULE_AUTHOR (DRIVER_AUTHOR); |
MODULE_LICENSE ("GPL"); |
static int __init init (void) |
/*static*/ int __init ehci_hcd_init (void) |
{ |
if (usb_disabled()) |
return -ENODEV; |
1023,10 → 1025,10 |
return pci_module_init (&ehci_pci_driver); |
} |
module_init (init); |
//module_init (init); |
static void __exit cleanup (void) |
/*static*/ void __exit ehci_hcd_cleanup (void) |
{ |
pci_unregister_driver (&ehci_pci_driver); |
} |
module_exit (cleanup); |
//module_exit (cleanup); |
/shark/trunk/drivers/usb/host/ehci.h |
---|
134,7 → 134,7 |
t = EHCI_SHRINK_JIFFIES; |
break; |
} |
t += jiffies; |
t += jiffies26; |
// all timings except IAA watchdog can be overridden. |
// async queue SHRINK often precedes IAA. while it's ready |
// to go OFF neither can matter, and afterwards the IO |
/shark/trunk/drivers/usb/host/ohci-hcd.c |
---|
1,691 → 1,691 |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* [ Initialisation is based on Linus' ] |
* [ uhci code and gregs ohci fragments ] |
* [ (C) Copyright 1999 Linus Torvalds ] |
* [ (C) Copyright 1999 Gregory P. Smith] |
* |
* |
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller |
* interfaces (though some non-x86 Intel chips use it). It supports |
* smarter hardware than UHCI. A download link for the spec available |
* through the http://www.usb.org website. |
* |
* History: |
* |
* 2003/02/24 show registers in sysfs (Kevin Brosius) |
* |
* 2002/09/03 get rid of ed hashtables, rework periodic scheduling and |
* bandwidth accounting; if debugging, show schedules in driverfs |
* 2002/07/19 fixes to management of ED and schedule state. |
* 2002/06/09 SA-1111 support (Christopher Hoover) |
* 2002/06/01 remember frame when HC won't see EDs any more; use that info |
* to fix urb unlink races caused by interrupt latency assumptions; |
* minor ED field and function naming updates |
* 2002/01/18 package as a patch for 2.5.3; this should match the |
* 2.4.17 kernel modulo some bugs being fixed. |
* |
* 2001/10/18 merge pmac cleanup (Benjamin Herrenschmidt) and bugfixes |
* from post-2.4.5 patches. |
* 2001/09/20 URB_ZERO_PACKET support; hcca_dma portability, OPTi warning |
* 2001/09/07 match PCI PM changes, errnos from Linus' tree |
* 2001/05/05 fork 2.4.5 version into "hcd" framework, cleanup, simplify; |
* pbook pci quirks gone (please fix pbook pci sw!) (db) |
* |
* 2001/04/08 Identify version on module load (gb) |
* 2001/03/24 td/ed hashing to remove bus_to_virt (Steve Longerbeam); |
pci_map_single (db) |
* 2001/03/21 td and dev/ed allocation uses new pci_pool API (db) |
* 2001/03/07 hcca allocation uses pci_alloc_consistent (Steve Longerbeam) |
* |
* 2000/09/26 fixed races in removing the private portion of the urb |
* 2000/09/07 disable bulk and control lists when unlinking the last |
* endpoint descriptor in order to avoid unrecoverable errors on |
* the Lucent chips. (rwc@sgi) |
* 2000/08/29 use bandwidth claiming hooks (thanks Randy!), fix some |
* urb unlink probs, indentation fixes |
* 2000/08/11 various oops fixes mostly affecting iso and cleanup from |
* device unplugs. |
* 2000/06/28 use PCI hotplug framework, for better power management |
* and for Cardbus support (David Brownell) |
* 2000/earlier: fixes for NEC/Lucent chips; suspend/resume handling |
* when the controller loses power; handle UE; cleanup; ... |
* |
* v5.2 1999/12/07 URB 3rd preview, |
* v5.1 1999/11/30 URB 2nd preview, cpia, (usb-scsi) |
* v5.0 1999/11/22 URB Technical preview, Paul Mackerras powerbook susp/resume |
* i386: HUB, Keyboard, Mouse, Printer |
* |
* v4.3 1999/10/27 multiple HCs, bulk_request |
* v4.2 1999/09/05 ISO API alpha, new dev alloc, neg Error-codes |
* v4.1 1999/08/27 Randy Dunlap's - ISO API first impl. |
* v4.0 1999/08/18 |
* v3.0 1999/06/25 |
* v2.1 1999/05/09 code clean up |
* v2.0 1999/05/04 |
* v1.0 1999/04/27 initial release |
* |
* This file is licenced under the GPL. |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#ifdef CONFIG_USB_DEBUG |
# define DEBUG |
#else |
# undef DEBUG |
#endif |
#include <linux/module.h> |
#include <linux/pci.h> |
#include <linux/kernel.h> |
#include <linux/delay.h> |
#include <linux/ioport.h> |
#include <linux/sched.h> |
#include <linux/slab.h> |
#include <linux/smp_lock.h> |
#include <linux/errno.h> |
#include <linux/init.h> |
#include <linux/timer.h> |
#include <linux/list.h> |
#include <linux/interrupt.h> /* for in_interrupt () */ |
#include <linux/usb.h> |
#include "../core/hcd.h" |
#include <asm/io.h> |
#include <asm/irq.h> |
#include <asm/system.h> |
#include <asm/unaligned.h> |
#include <asm/byteorder.h> |
#define DRIVER_VERSION "2003 Oct 13" |
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell" |
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" |
/*-------------------------------------------------------------------------*/ |
//#define OHCI_VERBOSE_DEBUG /* not always helpful */ |
/* For initializing controller (mask in an HCFS mode too) */ |
#define OHCI_CONTROL_INIT \ |
(OHCI_CTRL_CBSR & 0x3) | OHCI_CTRL_IE | OHCI_CTRL_PLE |
#define OHCI_UNLINK_TIMEOUT (HZ / 10) |
/*-------------------------------------------------------------------------*/ |
static const char hcd_name [] = "ohci_hcd"; |
#include "ohci.h" |
static inline void disable (struct ohci_hcd *ohci) |
{ |
ohci->hcd.state = USB_STATE_HALT; |
} |
#include "ohci-hub.c" |
#include "ohci-dbg.c" |
#include "ohci-mem.c" |
#include "ohci-q.c" |
/*-------------------------------------------------------------------------*/ |
/* |
* queue up an urb for anything except the root hub |
*/ |
static int ohci_urb_enqueue ( |
struct usb_hcd *hcd, |
struct urb *urb, |
int mem_flags |
) { |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
struct ed *ed; |
urb_priv_t *urb_priv; |
unsigned int pipe = urb->pipe; |
int i, size = 0; |
unsigned long flags; |
int retval = 0; |
#ifdef OHCI_VERBOSE_DEBUG |
urb_print (urb, "SUB", usb_pipein (pipe)); |
#endif |
/* every endpoint has a ed, locate and maybe (re)initialize it */ |
if (! (ed = ed_get (ohci, urb->dev, pipe, urb->interval))) |
return -ENOMEM; |
/* for the private part of the URB we need the number of TDs (size) */ |
switch (ed->type) { |
case PIPE_CONTROL: |
/* td_submit_urb() doesn't yet handle these */ |
if (urb->transfer_buffer_length > 4096) |
return -EMSGSIZE; |
/* 1 TD for setup, 1 for ACK, plus ... */ |
size = 2; |
/* FALLTHROUGH */ |
// case PIPE_INTERRUPT: |
// case PIPE_BULK: |
default: |
/* one TD for every 4096 Bytes (can be upto 8K) */ |
size += urb->transfer_buffer_length / 4096; |
/* ... and for any remaining bytes ... */ |
if ((urb->transfer_buffer_length % 4096) != 0) |
size++; |
/* ... and maybe a zero length packet to wrap it up */ |
if (size == 0) |
size++; |
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0 |
&& (urb->transfer_buffer_length |
% usb_maxpacket (urb->dev, pipe, |
usb_pipeout (pipe))) == 0) |
size++; |
break; |
case PIPE_ISOCHRONOUS: /* number of packets from URB */ |
size = urb->number_of_packets; |
break; |
} |
/* allocate the private part of the URB */ |
urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (struct td *), |
mem_flags); |
if (!urb_priv) |
return -ENOMEM; |
memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *)); |
/* fill the private part of the URB */ |
urb_priv->length = size; |
urb_priv->ed = ed; |
/* allocate the TDs (deferring hash chain updates) */ |
for (i = 0; i < size; i++) { |
urb_priv->td [i] = td_alloc (ohci, mem_flags); |
if (!urb_priv->td [i]) { |
urb_priv->length = i; |
urb_free_priv (ohci, urb_priv); |
return -ENOMEM; |
} |
} |
spin_lock_irqsave (&ohci->lock, flags); |
/* don't submit to a dead HC */ |
if (!HCD_IS_RUNNING(ohci->hcd.state)) { |
retval = -ENODEV; |
goto fail; |
} |
/* schedule the ed if needed */ |
if (ed->state == ED_IDLE) { |
retval = ed_schedule (ohci, ed); |
if (retval < 0) |
goto fail; |
if (ed->type == PIPE_ISOCHRONOUS) { |
u16 frame = le16_to_cpu (ohci->hcca->frame_no); |
/* delay a few frames before the first TD */ |
frame += max_t (u16, 8, ed->interval); |
frame &= ~(ed->interval - 1); |
frame |= ed->branch; |
urb->start_frame = frame; |
/* yes, only URB_ISO_ASAP is supported, and |
* urb->start_frame is never used as input. |
*/ |
} |
} else if (ed->type == PIPE_ISOCHRONOUS) |
urb->start_frame = ed->last_iso + ed->interval; |
/* fill the TDs and link them to the ed; and |
* enable that part of the schedule, if needed |
* and update count of queued periodic urbs |
*/ |
urb->hcpriv = urb_priv; |
td_submit_urb (ohci, urb); |
fail: |
if (retval) |
urb_free_priv (ohci, urb_priv); |
spin_unlock_irqrestore (&ohci->lock, flags); |
return retval; |
} |
/* |
* decouple the URB from the HC queues (TDs, urb_priv); it's |
* already marked using urb->status. reporting is always done |
* asynchronously, and we might be dealing with an urb that's |
* partially transferred, or an ED with other urbs being unlinked. |
*/ |
static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
unsigned long flags; |
#ifdef OHCI_VERBOSE_DEBUG |
urb_print (urb, "UNLINK", 1); |
#endif |
spin_lock_irqsave (&ohci->lock, flags); |
if (HCD_IS_RUNNING(ohci->hcd.state)) { |
urb_priv_t *urb_priv; |
/* Unless an IRQ completed the unlink while it was being |
* handed to us, flag it for unlink and giveback, and force |
* some upcoming INTR_SF to call finish_unlinks() |
*/ |
urb_priv = urb->hcpriv; |
if (urb_priv) { |
if (urb_priv->ed->state == ED_OPER) |
start_urb_unlink (ohci, urb_priv->ed); |
} |
} else { |
/* |
* with HC dead, we won't respect hc queue pointers |
* any more ... just clean up every urb's memory. |
*/ |
if (urb->hcpriv) { |
spin_unlock (&ohci->lock); |
finish_urb (ohci, urb, NULL); |
spin_lock (&ohci->lock); |
} |
} |
spin_unlock_irqrestore (&ohci->lock, flags); |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
/* frees config/altsetting state for endpoints, |
* including ED memory, dummy TD, and bulk/intr data toggle |
*/ |
static void |
ohci_endpoint_disable (struct usb_hcd *hcd, struct hcd_dev *dev, int ep) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int epnum = ep & USB_ENDPOINT_NUMBER_MASK; |
unsigned long flags; |
struct ed *ed; |
unsigned limit = 1000; |
/* ASSERT: any requests/urbs are being unlinked */ |
/* ASSERT: nobody can be submitting urbs for this any more */ |
epnum <<= 1; |
if (epnum != 0 && !(ep & USB_DIR_IN)) |
epnum |= 1; |
rescan: |
spin_lock_irqsave (&ohci->lock, flags); |
ed = dev->ep [epnum]; |
if (!ed) |
goto done; |
if (!HCD_IS_RUNNING (ohci->hcd.state)) |
ed->state = ED_IDLE; |
switch (ed->state) { |
case ED_UNLINK: /* wait for hw to finish? */ |
/* major IRQ delivery trouble loses INTR_SF too... */ |
WARN_ON (limit-- == 0); |
spin_unlock_irqrestore (&ohci->lock, flags); |
set_current_state (TASK_UNINTERRUPTIBLE); |
schedule_timeout (1); |
goto rescan; |
case ED_IDLE: /* fully unlinked */ |
if (list_empty (&ed->td_list)) { |
td_free (ohci, ed->dummy); |
ed_free (ohci, ed); |
break; |
} |
/* else FALL THROUGH */ |
default: |
/* caller was supposed to have unlinked any requests; |
* that's not our job. can't recover; must leak ed. |
*/ |
ohci_err (ohci, "leak ed %p (#%d) state %d%s\n", |
ed, epnum, ed->state, |
list_empty (&ed->td_list) ? "" : " (has tds)"); |
td_free (ohci, ed->dummy); |
break; |
} |
dev->ep [epnum] = 0; |
done: |
spin_unlock_irqrestore (&ohci->lock, flags); |
return; |
} |
static int ohci_get_frame (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
return le16_to_cpu (ohci->hcca->frame_no); |
} |
/*-------------------------------------------------------------------------* |
* HC functions |
*-------------------------------------------------------------------------*/ |
/* reset the HC and BUS */ |
static int hc_reset (struct ohci_hcd *ohci) |
{ |
u32 temp; |
/* SMM owns the HC? not for long! |
* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. |
*/ |
#ifndef __hppa__ |
if (readl (&ohci->regs->control) & OHCI_CTRL_IR) { |
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n"); |
/* this timeout is arbitrary. we make it long, so systems |
* depending on usb keyboards may be usable even if the |
* BIOS/SMM code seems pretty broken. |
*/ |
temp = 500; /* arbitrary: five seconds */ |
writel (OHCI_INTR_OC, &ohci->regs->intrenable); |
writel (OHCI_OCR, &ohci->regs->cmdstatus); |
while (readl (&ohci->regs->control) & OHCI_CTRL_IR) { |
wait_ms (10); |
if (--temp == 0) { |
ohci_err (ohci, "USB HC TakeOver failed!\n"); |
return -1; |
} |
} |
} |
#endif |
/* Disable HC interrupts */ |
writel (OHCI_INTR_MIE, &ohci->regs->intrdisable); |
ohci_dbg (ohci, "reset, control = 0x%x\n", |
readl (&ohci->regs->control)); |
/* Reset USB (needed by some controllers); RemoteWakeupConnected |
* saved if boot firmware (BIOS/SMM/...) told us it's connected |
*/ |
ohci->hc_control = readl (&ohci->regs->control); |
ohci->hc_control &= OHCI_CTRL_RWC; /* hcfs 0 = RESET */ |
writel (ohci->hc_control, &ohci->regs->control); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
wait_ms (50); |
/* HC Reset requires max 10 us delay */ |
writel (OHCI_HCR, &ohci->regs->cmdstatus); |
temp = 30; /* ... allow extra time */ |
while ((readl (&ohci->regs->cmdstatus) & OHCI_HCR) != 0) { |
if (--temp == 0) { |
ohci_err (ohci, "USB HC reset timed out!\n"); |
return -1; |
} |
udelay (1); |
} |
/* now we're in the SUSPEND state ... must go OPERATIONAL |
* within 2msec else HC enters RESUME |
* |
* ... but some hardware won't init fmInterval "by the book" |
* (SiS, OPTi ...), so reset again instead. SiS doesn't need |
* this if we write fmInterval after we're OPERATIONAL. |
*/ |
writel (ohci->hc_control, &ohci->regs->control); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
#define FI 0x2edf /* 12000 bits per frame (-1) */ |
#define LSTHRESH 0x628 /* lowspeed bit threshold */ |
/* Start an OHCI controller, set the BUS operational |
* enable interrupts |
* connect the virtual root hub |
*/ |
static int hc_start (struct ohci_hcd *ohci) |
{ |
u32 mask, tmp; |
struct usb_device *udev; |
struct usb_bus *bus; |
spin_lock_init (&ohci->lock); |
disable (ohci); |
/* Tell the controller where the control and bulk lists are |
* The lists are empty now. */ |
writel (0, &ohci->regs->ed_controlhead); |
writel (0, &ohci->regs->ed_bulkhead); |
/* a reset clears this */ |
writel ((u32) ohci->hcca_dma, &ohci->regs->hcca); |
/* force default fmInterval (we won't adjust it); init thresholds |
* for last FS and LS packets, reserve 90% for periodic. |
*/ |
writel ((((6 * (FI - 210)) / 7) << 16) | FI, &ohci->regs->fminterval); |
writel (((9 * FI) / 10) & 0x3fff, &ohci->regs->periodicstart); |
writel (LSTHRESH, &ohci->regs->lsthresh); |
/* some OHCI implementations are finicky about how they init. |
* bogus values here mean not even enumeration could work. |
*/ |
if ((readl (&ohci->regs->fminterval) & 0x3fff0000) == 0 |
|| !readl (&ohci->regs->periodicstart)) { |
ohci_err (ohci, "init err\n"); |
return -EOVERFLOW; |
} |
/* start controller operations */ |
ohci->hc_control &= OHCI_CTRL_RWC; |
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER; |
writel (ohci->hc_control, &ohci->regs->control); |
ohci->hcd.state = USB_STATE_RUNNING; |
/* Choose the interrupts we care about now, others later on demand */ |
mask = OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_WDH; |
writel (mask, &ohci->regs->intrstatus); |
writel (mask, &ohci->regs->intrenable); |
/* handle root hub init quirks ... */ |
tmp = roothub_a (ohci); |
tmp &= ~(RH_A_PSM | RH_A_OCPM); |
if (ohci->flags & OHCI_QUIRK_SUPERIO) { |
/* NSC 87560 and maybe others */ |
tmp |= RH_A_NOCP; |
tmp &= ~(RH_A_POTPGT | RH_A_NPS); |
} else { |
/* hub power always on; required for AMD-756 and some |
* Mac platforms, use this mode everywhere by default |
*/ |
tmp |= RH_A_NPS; |
} |
writel (tmp, &ohci->regs->roothub.a); |
writel (RH_HS_LPSC, &ohci->regs->roothub.status); |
writel (0, &ohci->regs->roothub.b); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
// POTPGT delay is bits 24-31, in 2 ms units. |
mdelay ((roothub_a (ohci) >> 23) & 0x1fe); |
/* connect the virtual root hub */ |
bus = hcd_to_bus (&ohci->hcd); |
bus->root_hub = udev = usb_alloc_dev (NULL, bus); |
ohci->hcd.state = USB_STATE_RUNNING; |
if (!udev) { |
disable (ohci); |
ohci->hc_control &= ~OHCI_CTRL_HCFS; |
writel (ohci->hc_control, &ohci->regs->control); |
return -ENOMEM; |
} |
udev->speed = USB_SPEED_FULL; |
if (hcd_register_root (&ohci->hcd) != 0) { |
usb_put_dev (udev); |
bus->root_hub = NULL; |
disable (ohci); |
ohci->hc_control &= ~OHCI_CTRL_HCFS; |
writel (ohci->hc_control, &ohci->regs->control); |
return -ENODEV; |
} |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
/* an interrupt happens */ |
static void ohci_irq (struct usb_hcd *hcd, struct pt_regs *ptregs) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
struct ohci_regs *regs = ohci->regs; |
int ints; |
/* we can eliminate a (slow) readl() if _only_ WDH caused this irq */ |
if ((ohci->hcca->done_head != 0) |
&& ! (le32_to_cpup (&ohci->hcca->done_head) & 0x01)) { |
ints = OHCI_INTR_WDH; |
/* cardbus/... hardware gone before remove() */ |
} else if ((ints = readl (®s->intrstatus)) == ~(u32)0) { |
disable (ohci); |
ohci_dbg (ohci, "device removed!\n"); |
return; |
/* interrupt for some other device? */ |
} else if ((ints &= readl (®s->intrenable)) == 0) { |
return; |
} |
if (ints & OHCI_INTR_UE) { |
disable (ohci); |
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n"); |
// e.g. due to PCI Master/Target Abort |
ohci_dump (ohci, 1); |
hc_reset (ohci); |
} |
if (ints & OHCI_INTR_WDH) { |
if (HCD_IS_RUNNING(hcd->state)) |
writel (OHCI_INTR_WDH, ®s->intrdisable); |
dl_done_list (ohci, dl_reverse_done_list (ohci), ptregs); |
if (HCD_IS_RUNNING(hcd->state)) |
writel (OHCI_INTR_WDH, ®s->intrenable); |
} |
/* could track INTR_SO to reduce available PCI/... bandwidth */ |
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled |
* when there's still unlinking to be done (next frame). |
*/ |
spin_lock (&ohci->lock); |
if (ohci->ed_rm_list) |
finish_unlinks (ohci, le16_to_cpu (ohci->hcca->frame_no), |
ptregs); |
if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list |
&& HCD_IS_RUNNING(ohci->hcd.state)) |
writel (OHCI_INTR_SF, ®s->intrdisable); |
spin_unlock (&ohci->lock); |
if (HCD_IS_RUNNING(ohci->hcd.state)) { |
writel (ints, ®s->intrstatus); |
writel (OHCI_INTR_MIE, ®s->intrenable); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
} |
} |
/*-------------------------------------------------------------------------*/ |
static void ohci_stop (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
ohci_dbg (ohci, "stop %s controller (state 0x%02x)\n", |
hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS), |
ohci->hcd.state); |
ohci_dump (ohci, 1); |
if (HCD_IS_RUNNING(ohci->hcd.state)) |
hc_reset (ohci); |
remove_debug_files (ohci); |
ohci_mem_cleanup (ohci); |
if (ohci->hcca) { |
pci_free_consistent (ohci->hcd.pdev, sizeof *ohci->hcca, |
ohci->hcca, ohci->hcca_dma); |
ohci->hcca = NULL; |
ohci->hcca_dma = 0; |
} |
} |
/*-------------------------------------------------------------------------*/ |
// FIXME: this restart logic should be generic, |
// and handle full hcd state cleanup |
/* controller died; cleanup debris, then restart */ |
/* must not be called from interrupt context */ |
#ifdef CONFIG_PM |
static int hc_restart (struct ohci_hcd *ohci) |
{ |
int temp; |
int i; |
disable (ohci); |
if (hcd_to_bus (&ohci->hcd)->root_hub) |
usb_disconnect (&hcd_to_bus (&ohci->hcd)->root_hub); |
/* empty the interrupt branches */ |
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0; |
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0; |
/* no EDs to remove */ |
ohci->ed_rm_list = NULL; |
/* empty control and bulk lists */ |
ohci->ed_controltail = NULL; |
ohci->ed_bulktail = NULL; |
if ((temp = hc_reset (ohci)) < 0 || (temp = hc_start (ohci)) < 0) { |
ohci_err (ohci, "can't restart, %d\n", temp); |
return temp; |
} else |
ohci_dbg (ohci, "restart complete\n"); |
return 0; |
} |
#endif |
/*-------------------------------------------------------------------------*/ |
#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC |
MODULE_AUTHOR (DRIVER_AUTHOR); |
MODULE_DESCRIPTION (DRIVER_INFO); |
MODULE_LICENSE ("GPL"); |
#ifdef CONFIG_PCI |
#include "ohci-pci.c" |
#endif |
#ifdef CONFIG_SA1111 |
#include "ohci-sa1111.c" |
#endif |
#if !(defined(CONFIG_PCI) || defined(CONFIG_SA1111)) |
#error "missing bus glue for ohci-hcd" |
#endif |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* [ Initialisation is based on Linus' ] |
* [ uhci code and gregs ohci fragments ] |
* [ (C) Copyright 1999 Linus Torvalds ] |
* [ (C) Copyright 1999 Gregory P. Smith] |
* |
* |
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller |
* interfaces (though some non-x86 Intel chips use it). It supports |
* smarter hardware than UHCI. A download link for the spec available |
* through the http://www.usb.org website. |
* |
* History: |
* |
* 2003/02/24 show registers in sysfs (Kevin Brosius) |
* |
* 2002/09/03 get rid of ed hashtables, rework periodic scheduling and |
* bandwidth accounting; if debugging, show schedules in driverfs |
* 2002/07/19 fixes to management of ED and schedule state. |
* 2002/06/09 SA-1111 support (Christopher Hoover) |
* 2002/06/01 remember frame when HC won't see EDs any more; use that info |
* to fix urb unlink races caused by interrupt latency assumptions; |
* minor ED field and function naming updates |
* 2002/01/18 package as a patch for 2.5.3; this should match the |
* 2.4.17 kernel modulo some bugs being fixed. |
* |
* 2001/10/18 merge pmac cleanup (Benjamin Herrenschmidt) and bugfixes |
* from post-2.4.5 patches. |
* 2001/09/20 URB_ZERO_PACKET support; hcca_dma portability, OPTi warning |
* 2001/09/07 match PCI PM changes, errnos from Linus' tree |
* 2001/05/05 fork 2.4.5 version into "hcd" framework, cleanup, simplify; |
* pbook pci quirks gone (please fix pbook pci sw!) (db) |
* |
* 2001/04/08 Identify version on module load (gb) |
* 2001/03/24 td/ed hashing to remove bus_to_virt (Steve Longerbeam); |
pci_map_single (db) |
* 2001/03/21 td and dev/ed allocation uses new pci_pool API (db) |
* 2001/03/07 hcca allocation uses pci_alloc_consistent_usb (Steve Longerbeam) |
* |
* 2000/09/26 fixed races in removing the private portion of the urb |
* 2000/09/07 disable bulk and control lists when unlinking the last |
* endpoint descriptor in order to avoid unrecoverable errors on |
* the Lucent chips. (rwc@sgi) |
* 2000/08/29 use bandwidth claiming hooks (thanks Randy!), fix some |
* urb unlink probs, indentation fixes |
* 2000/08/11 various oops fixes mostly affecting iso and cleanup from |
* device unplugs. |
* 2000/06/28 use PCI hotplug framework, for better power management |
* and for Cardbus support (David Brownell) |
* 2000/earlier: fixes for NEC/Lucent chips; suspend/resume handling |
* when the controller loses power; handle UE; cleanup; ... |
* |
* v5.2 1999/12/07 URB 3rd preview, |
* v5.1 1999/11/30 URB 2nd preview, cpia, (usb-scsi) |
* v5.0 1999/11/22 URB Technical preview, Paul Mackerras powerbook susp/resume |
* i386: HUB, Keyboard, Mouse, Printer |
* |
* v4.3 1999/10/27 multiple HCs, bulk_request |
* v4.2 1999/09/05 ISO API alpha, new dev alloc, neg Error-codes |
* v4.1 1999/08/27 Randy Dunlap's - ISO API first impl. |
* v4.0 1999/08/18 |
* v3.0 1999/06/25 |
* v2.1 1999/05/09 code clean up |
* v2.0 1999/05/04 |
* v1.0 1999/04/27 initial release |
* |
* This file is licenced under the GPL. |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#ifdef CONFIG_USB_DEBUG |
# define DEBUG |
#else |
# undef DEBUG |
#endif |
#include <linux/module.h> |
#include <linux/pci.h> |
#include <linux/kernel.h> |
#include <linux/delay.h> |
#include <linux/ioport.h> |
#include <linux/sched.h> |
#include <linux/slab.h> |
#include <linux/smp_lock.h> |
#include <linux/errno.h> |
#include <linux/init.h> |
#include <linux/timer.h> |
#include <linux/list.h> |
#include <linux/interrupt.h> /* for in_interrupt () */ |
#include <linux/usb.h> |
#include "../core/hcd.h" |
#include <asm/io.h> |
#include <asm/irq.h> |
#include <asm/system.h> |
#include <asm/unaligned.h> |
#include <asm/byteorder.h> |
#define DRIVER_VERSION "2003 Oct 13" |
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell" |
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" |
/*-------------------------------------------------------------------------*/ |
//#define OHCI_VERBOSE_DEBUG /* not always helpful */ |
/* For initializing controller (mask in an HCFS mode too) */ |
#define OHCI_CONTROL_INIT \ |
(OHCI_CTRL_CBSR & 0x3) | OHCI_CTRL_IE | OHCI_CTRL_PLE |
#define OHCI_UNLINK_TIMEOUT (HZ / 10) |
/*-------------------------------------------------------------------------*/ |
static const char hcd_name [] = "ohci_hcd"; |
#include "ohci.h" |
static inline void disable (struct ohci_hcd *ohci) |
{ |
ohci->hcd.state = USB_STATE_HALT; |
} |
#include "ohci-hub.c" |
#include "ohci-dbg.c" |
#include "ohci-mem.c" |
#include "ohci-q.c" |
/*-------------------------------------------------------------------------*/ |
/* |
* queue up an urb for anything except the root hub |
*/ |
static int ohci_urb_enqueue ( |
struct usb_hcd *hcd, |
struct urb *urb, |
int mem_flags |
) { |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
struct ed *ed; |
urb_priv_t *urb_priv; |
unsigned int pipe = urb->pipe; |
int i, size = 0; |
unsigned long flags; |
int retval = 0; |
#ifdef OHCI_VERBOSE_DEBUG |
urb_print (urb, "SUB", usb_pipein (pipe)); |
#endif |
/* every endpoint has a ed, locate and maybe (re)initialize it */ |
if (! (ed = ed_get (ohci, urb->dev, pipe, urb->interval))) |
return -ENOMEM; |
/* for the private part of the URB we need the number of TDs (size) */ |
switch (ed->type) { |
case PIPE_CONTROL: |
/* td_submit_urb() doesn't yet handle these */ |
if (urb->transfer_buffer_length > 4096) |
return -EMSGSIZE; |
/* 1 TD for setup, 1 for ACK, plus ... */ |
size = 2; |
/* FALLTHROUGH */ |
// case PIPE_INTERRUPT: |
// case PIPE_BULK: |
default: |
/* one TD for every 4096 Bytes (can be upto 8K) */ |
size += urb->transfer_buffer_length / 4096; |
/* ... and for any remaining bytes ... */ |
if ((urb->transfer_buffer_length % 4096) != 0) |
size++; |
/* ... and maybe a zero length packet to wrap it up */ |
if (size == 0) |
size++; |
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0 |
&& (urb->transfer_buffer_length |
% usb_maxpacket (urb->dev, pipe, |
usb_pipeout (pipe))) == 0) |
size++; |
break; |
case PIPE_ISOCHRONOUS: /* number of packets from URB */ |
size = urb->number_of_packets; |
break; |
} |
/* allocate the private part of the URB */ |
urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (struct td *), |
mem_flags); |
if (!urb_priv) |
return -ENOMEM; |
memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *)); |
/* fill the private part of the URB */ |
urb_priv->length = size; |
urb_priv->ed = ed; |
/* allocate the TDs (deferring hash chain updates) */ |
for (i = 0; i < size; i++) { |
urb_priv->td [i] = td_alloc (ohci, mem_flags); |
if (!urb_priv->td [i]) { |
urb_priv->length = i; |
urb_free_priv (ohci, urb_priv); |
return -ENOMEM; |
} |
} |
spin_lock_irqsave (&ohci->lock, flags); |
/* don't submit to a dead HC */ |
if (!HCD_IS_RUNNING(ohci->hcd.state)) { |
retval = -ENODEV; |
goto fail; |
} |
/* schedule the ed if needed */ |
if (ed->state == ED_IDLE) { |
retval = ed_schedule (ohci, ed); |
if (retval < 0) |
goto fail; |
if (ed->type == PIPE_ISOCHRONOUS) { |
u16 frame = le16_to_cpu (ohci->hcca->frame_no); |
/* delay a few frames before the first TD */ |
frame += max_t (u16, 8, ed->interval); |
frame &= ~(ed->interval - 1); |
frame |= ed->branch; |
urb->start_frame = frame; |
/* yes, only URB_ISO_ASAP is supported, and |
* urb->start_frame is never used as input. |
*/ |
} |
} else if (ed->type == PIPE_ISOCHRONOUS) |
urb->start_frame = ed->last_iso + ed->interval; |
/* fill the TDs and link them to the ed; and |
* enable that part of the schedule, if needed |
* and update count of queued periodic urbs |
*/ |
urb->hcpriv = urb_priv; |
td_submit_urb (ohci, urb); |
fail: |
if (retval) |
urb_free_priv (ohci, urb_priv); |
spin_unlock_irqrestore (&ohci->lock, flags); |
return retval; |
} |
/* |
* decouple the URB from the HC queues (TDs, urb_priv); it's |
* already marked using urb->status. reporting is always done |
* asynchronously, and we might be dealing with an urb that's |
* partially transferred, or an ED with other urbs being unlinked. |
*/ |
static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
unsigned long flags; |
#ifdef OHCI_VERBOSE_DEBUG |
urb_print (urb, "UNLINK", 1); |
#endif |
spin_lock_irqsave (&ohci->lock, flags); |
if (HCD_IS_RUNNING(ohci->hcd.state)) { |
urb_priv_t *urb_priv; |
/* Unless an IRQ completed the unlink while it was being |
* handed to us, flag it for unlink and giveback, and force |
* some upcoming INTR_SF to call finish_unlinks() |
*/ |
urb_priv = urb->hcpriv; |
if (urb_priv) { |
if (urb_priv->ed->state == ED_OPER) |
start_urb_unlink (ohci, urb_priv->ed); |
} |
} else { |
/* |
* with HC dead, we won't respect hc queue pointers |
* any more ... just clean up every urb's memory. |
*/ |
if (urb->hcpriv) { |
spin_unlock (&ohci->lock); |
finish_urb (ohci, urb, NULL); |
spin_lock (&ohci->lock); |
} |
} |
spin_unlock_irqrestore (&ohci->lock, flags); |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
/* frees config/altsetting state for endpoints, |
* including ED memory, dummy TD, and bulk/intr data toggle |
*/ |
static void |
ohci_endpoint_disable (struct usb_hcd *hcd, struct hcd_dev *dev, int ep) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int epnum = ep & USB_ENDPOINT_NUMBER_MASK; |
unsigned long flags; |
struct ed *ed; |
unsigned limit = 1000; |
/* ASSERT: any requests/urbs are being unlinked */ |
/* ASSERT: nobody can be submitting urbs for this any more */ |
epnum <<= 1; |
if (epnum != 0 && !(ep & USB_DIR_IN)) |
epnum |= 1; |
rescan: |
spin_lock_irqsave (&ohci->lock, flags); |
ed = dev->ep [epnum]; |
if (!ed) |
goto done; |
if (!HCD_IS_RUNNING (ohci->hcd.state)) |
ed->state = ED_IDLE; |
switch (ed->state) { |
case ED_UNLINK: /* wait for hw to finish? */ |
/* major IRQ delivery trouble loses INTR_SF too... */ |
WARN_ON (limit-- == 0); |
spin_unlock_irqrestore (&ohci->lock, flags); |
set_current_state (TASK_UNINTERRUPTIBLE); |
schedule_timeout (1); |
goto rescan; |
case ED_IDLE: /* fully unlinked */ |
if (list_empty (&ed->td_list)) { |
td_free (ohci, ed->dummy); |
ed_free (ohci, ed); |
break; |
} |
/* else FALL THROUGH */ |
default: |
/* caller was supposed to have unlinked any requests; |
* that's not our job. can't recover; must leak ed. |
*/ |
ohci_err (ohci, "leak ed %p (#%d) state %d%s\n", |
ed, epnum, ed->state, |
list_empty (&ed->td_list) ? "" : " (has tds)"); |
td_free (ohci, ed->dummy); |
break; |
} |
dev->ep [epnum] = 0; |
done: |
spin_unlock_irqrestore (&ohci->lock, flags); |
return; |
} |
static int ohci_get_frame (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
return le16_to_cpu (ohci->hcca->frame_no); |
} |
/*-------------------------------------------------------------------------* |
* HC functions |
*-------------------------------------------------------------------------*/ |
/* reset the HC and BUS */ |
static int hc_reset (struct ohci_hcd *ohci) |
{ |
u32 temp; |
/* SMM owns the HC? not for long! |
* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. |
*/ |
#ifndef __hppa__ |
if (readl (&ohci->regs->control) & OHCI_CTRL_IR) { |
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n"); |
/* this timeout is arbitrary. we make it long, so systems |
* depending on usb keyboards may be usable even if the |
* BIOS/SMM code seems pretty broken. |
*/ |
temp = 500; /* arbitrary: five seconds */ |
writel (OHCI_INTR_OC, &ohci->regs->intrenable); |
writel (OHCI_OCR, &ohci->regs->cmdstatus); |
while (readl (&ohci->regs->control) & OHCI_CTRL_IR) { |
wait_ms (10); |
if (--temp == 0) { |
ohci_err (ohci, "USB HC TakeOver failed!\n"); |
return -1; |
} |
} |
} |
#endif |
/* Disable HC interrupts */ |
writel (OHCI_INTR_MIE, &ohci->regs->intrdisable); |
ohci_dbg (ohci, "reset, control = 0x%x\n", |
readl (&ohci->regs->control)); |
/* Reset USB (needed by some controllers); RemoteWakeupConnected |
* saved if boot firmware (BIOS/SMM/...) told us it's connected |
*/ |
ohci->hc_control = readl (&ohci->regs->control); |
ohci->hc_control &= OHCI_CTRL_RWC; /* hcfs 0 = RESET */ |
writel (ohci->hc_control, &ohci->regs->control); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
wait_ms (50); |
/* HC Reset requires max 10 us delay */ |
writel (OHCI_HCR, &ohci->regs->cmdstatus); |
temp = 30; /* ... allow extra time */ |
while ((readl (&ohci->regs->cmdstatus) & OHCI_HCR) != 0) { |
if (--temp == 0) { |
ohci_err (ohci, "USB HC reset timed out!\n"); |
return -1; |
} |
udelay (1); |
} |
/* now we're in the SUSPEND state ... must go OPERATIONAL |
* within 2msec else HC enters RESUME |
* |
* ... but some hardware won't init fmInterval "by the book" |
* (SiS, OPTi ...), so reset again instead. SiS doesn't need |
* this if we write fmInterval after we're OPERATIONAL. |
*/ |
writel (ohci->hc_control, &ohci->regs->control); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
#define FI 0x2edf /* 12000 bits per frame (-1) */ |
#define LSTHRESH 0x628 /* lowspeed bit threshold */ |
/* Start an OHCI controller, set the BUS operational |
* enable interrupts |
* connect the virtual root hub |
*/ |
static int hc_start (struct ohci_hcd *ohci) |
{ |
u32 mask, tmp; |
struct usb_device *udev; |
struct usb_bus *bus; |
spin_lock_init (&ohci->lock); |
disable (ohci); |
/* Tell the controller where the control and bulk lists are |
* The lists are empty now. */ |
writel (0, &ohci->regs->ed_controlhead); |
writel (0, &ohci->regs->ed_bulkhead); |
/* a reset clears this */ |
writel ((u32) ohci->hcca_dma, &ohci->regs->hcca); |
/* force default fmInterval (we won't adjust it); init thresholds |
* for last FS and LS packets, reserve 90% for periodic. |
*/ |
writel ((((6 * (FI - 210)) / 7) << 16) | FI, &ohci->regs->fminterval); |
writel (((9 * FI) / 10) & 0x3fff, &ohci->regs->periodicstart); |
writel (LSTHRESH, &ohci->regs->lsthresh); |
/* some OHCI implementations are finicky about how they init. |
* bogus values here mean not even enumeration could work. |
*/ |
if ((readl (&ohci->regs->fminterval) & 0x3fff0000) == 0 |
|| !readl (&ohci->regs->periodicstart)) { |
ohci_err (ohci, "init err\n"); |
return -EOVERFLOW; |
} |
/* start controller operations */ |
ohci->hc_control &= OHCI_CTRL_RWC; |
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER; |
writel (ohci->hc_control, &ohci->regs->control); |
ohci->hcd.state = USB_STATE_RUNNING; |
/* Choose the interrupts we care about now, others later on demand */ |
mask = OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_WDH; |
writel (mask, &ohci->regs->intrstatus); |
writel (mask, &ohci->regs->intrenable); |
/* handle root hub init quirks ... */ |
tmp = roothub_a (ohci); |
tmp &= ~(RH_A_PSM | RH_A_OCPM); |
if (ohci->flags & OHCI_QUIRK_SUPERIO) { |
/* NSC 87560 and maybe others */ |
tmp |= RH_A_NOCP; |
tmp &= ~(RH_A_POTPGT | RH_A_NPS); |
} else { |
/* hub power always on; required for AMD-756 and some |
* Mac platforms, use this mode everywhere by default |
*/ |
tmp |= RH_A_NPS; |
} |
writel (tmp, &ohci->regs->roothub.a); |
writel (RH_HS_LPSC, &ohci->regs->roothub.status); |
writel (0, &ohci->regs->roothub.b); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
// POTPGT delay is bits 24-31, in 2 ms units. |
mdelay ((roothub_a (ohci) >> 23) & 0x1fe); |
/* connect the virtual root hub */ |
bus = hcd_to_bus (&ohci->hcd); |
bus->root_hub = udev = usb_alloc_dev (NULL, bus); |
ohci->hcd.state = USB_STATE_RUNNING; |
if (!udev) { |
disable (ohci); |
ohci->hc_control &= ~OHCI_CTRL_HCFS; |
writel (ohci->hc_control, &ohci->regs->control); |
return -ENOMEM; |
} |
udev->speed = USB_SPEED_FULL; |
if (hcd_register_root (&ohci->hcd) != 0) { |
usb_put_dev (udev); |
bus->root_hub = NULL; |
disable (ohci); |
ohci->hc_control &= ~OHCI_CTRL_HCFS; |
writel (ohci->hc_control, &ohci->regs->control); |
return -ENODEV; |
} |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
/* an interrupt happens */ |
static void ohci_irq (struct usb_hcd *hcd, struct pt_regs *ptregs) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
struct ohci_regs *regs = ohci->regs; |
int ints; |
/* we can eliminate a (slow) readl() if _only_ WDH caused this irq */ |
if ((ohci->hcca->done_head != 0) |
&& ! (le32_to_cpup (&ohci->hcca->done_head) & 0x01)) { |
ints = OHCI_INTR_WDH; |
/* cardbus/... hardware gone before remove() */ |
} else if ((ints = readl (®s->intrstatus)) == ~(u32)0) { |
disable (ohci); |
ohci_dbg (ohci, "device removed!\n"); |
return; |
/* interrupt for some other device? */ |
} else if ((ints &= readl (®s->intrenable)) == 0) { |
return; |
} |
if (ints & OHCI_INTR_UE) { |
disable (ohci); |
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n"); |
// e.g. due to PCI Master/Target Abort |
ohci_dump (ohci, 1); |
hc_reset (ohci); |
} |
if (ints & OHCI_INTR_WDH) { |
if (HCD_IS_RUNNING(hcd->state)) |
writel (OHCI_INTR_WDH, ®s->intrdisable); |
dl_done_list (ohci, dl_reverse_done_list (ohci), ptregs); |
if (HCD_IS_RUNNING(hcd->state)) |
writel (OHCI_INTR_WDH, ®s->intrenable); |
} |
/* could track INTR_SO to reduce available PCI/... bandwidth */ |
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled |
* when there's still unlinking to be done (next frame). |
*/ |
spin_lock (&ohci->lock); |
if (ohci->ed_rm_list) |
finish_unlinks (ohci, le16_to_cpu (ohci->hcca->frame_no), |
ptregs); |
if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list |
&& HCD_IS_RUNNING(ohci->hcd.state)) |
writel (OHCI_INTR_SF, ®s->intrdisable); |
spin_unlock (&ohci->lock); |
if (HCD_IS_RUNNING(ohci->hcd.state)) { |
writel (ints, ®s->intrstatus); |
writel (OHCI_INTR_MIE, ®s->intrenable); |
// flush those pci writes |
(void) readl (&ohci->regs->control); |
} |
} |
/*-------------------------------------------------------------------------*/ |
static void ohci_stop (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
ohci_dbg (ohci, "stop %s controller (state 0x%02x)\n", |
hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS), |
ohci->hcd.state); |
ohci_dump (ohci, 1); |
if (HCD_IS_RUNNING(ohci->hcd.state)) |
hc_reset (ohci); |
remove_debug_files (ohci); |
ohci_mem_cleanup (ohci); |
if (ohci->hcca) { |
pci_free_consistent (ohci->hcd.pdev, sizeof *ohci->hcca, |
ohci->hcca, ohci->hcca_dma); |
ohci->hcca = NULL; |
ohci->hcca_dma = 0; |
} |
} |
/*-------------------------------------------------------------------------*/ |
// FIXME: this restart logic should be generic, |
// and handle full hcd state cleanup |
/* controller died; cleanup debris, then restart */ |
/* must not be called from interrupt context */ |
#ifdef CONFIG_PM |
static int hc_restart (struct ohci_hcd *ohci) |
{ |
int temp; |
int i; |
disable (ohci); |
if (hcd_to_bus (&ohci->hcd)->root_hub) |
usb_disconnect (&hcd_to_bus (&ohci->hcd)->root_hub); |
/* empty the interrupt branches */ |
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0; |
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0; |
/* no EDs to remove */ |
ohci->ed_rm_list = NULL; |
/* empty control and bulk lists */ |
ohci->ed_controltail = NULL; |
ohci->ed_bulktail = NULL; |
if ((temp = hc_reset (ohci)) < 0 || (temp = hc_start (ohci)) < 0) { |
ohci_err (ohci, "can't restart, %d\n", temp); |
return temp; |
} else |
ohci_dbg (ohci, "restart complete\n"); |
return 0; |
} |
#endif |
/*-------------------------------------------------------------------------*/ |
#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC |
MODULE_AUTHOR (DRIVER_AUTHOR); |
MODULE_DESCRIPTION (DRIVER_INFO); |
MODULE_LICENSE ("GPL"); |
#ifdef CONFIG_PCI |
#include "ohci-pci.c" |
#endif |
#ifdef CONFIG_SA1111 |
#include "ohci-sa1111.c" |
#endif |
#if !(defined(CONFIG_PCI) || defined(CONFIG_SA1111)) |
#error "missing bus glue for ohci-hcd" |
#endif |
/shark/trunk/drivers/usb/host/uhci-hcd.c |
---|
1,2670 → 1,2675 |
/* |
* Universal Host Controller Interface driver for USB. |
* |
* Maintainer: Johannes Erdfelt <johannes@erdfelt.com> |
* |
* (C) Copyright 1999 Linus Torvalds |
* (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com |
* (C) Copyright 1999 Randy Dunlap |
* (C) Copyright 1999 Georg Acher, acher@in.tum.de |
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de |
* (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch |
* (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at |
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
* support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
* |
* Intel documents this fairly well, and as far as I know there |
* are no royalties or anything like that, but even so there are |
* people who decided that they want to do the same thing in a |
* completely different way. |
* |
* WARNING! The USB documentation is downright evil. Most of it |
* is just crap, written by a committee. You're better off ignoring |
* most of it, the important stuff is: |
* - the low-level protocol (fairly simple but lots of small details) |
* - working around the horridness of the rest |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#include <linux/module.h> |
#include <linux/pci.h> |
#include <linux/kernel.h> |
#include <linux/init.h> |
#include <linux/delay.h> |
#include <linux/ioport.h> |
#include <linux/sched.h> |
#include <linux/slab.h> |
#include <linux/smp_lock.h> |
#include <linux/errno.h> |
#include <linux/unistd.h> |
#include <linux/interrupt.h> |
#include <linux/spinlock.h> |
#include <linux/proc_fs.h> |
#ifdef CONFIG_USB_DEBUG |
#define DEBUG |
#else |
#undef DEBUG |
#endif |
#include <linux/usb.h> |
#include <asm/uaccess.h> |
#include <asm/io.h> |
#include <asm/irq.h> |
#include <asm/system.h> |
#include "../core/hcd.h" |
#include "uhci-hcd.h" |
#include <linux/pm.h> |
/* |
* Version Information |
*/ |
#define DRIVER_VERSION "v2.1" |
#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber" |
#define DRIVER_DESC "USB Universal Host Controller Interface driver" |
/* |
* debug = 0, no debugging messages |
* debug = 1, dump failed URB's except for stalls |
* debug = 2, dump all failed URB's (including stalls) |
* show all queues in /proc/driver/uhci/[pci_addr] |
* debug = 3, show all TD's in URB's when dumping |
*/ |
#ifdef DEBUG |
static int debug = 3; |
#else |
static int debug = 0; |
#endif |
MODULE_PARM(debug, "i"); |
MODULE_PARM_DESC(debug, "Debug level"); |
static char *errbuf; |
#define ERRBUF_LEN (PAGE_SIZE * 8) |
#include "uhci-hub.c" |
#include "uhci-debug.c" |
static kmem_cache_t *uhci_up_cachep; /* urb_priv */ |
static int uhci_get_current_frame_number(struct uhci_hcd *uhci); |
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb); |
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb); |
static void hc_state_transitions(struct uhci_hcd *uhci); |
/* If a transfer is still active after this much time, turn off FSBR */ |
#define IDLE_TIMEOUT (HZ / 20) /* 50 ms */ |
#define FSBR_DELAY (HZ / 20) /* 50 ms */ |
/* When we timeout an idle transfer for FSBR, we'll switch it over to */ |
/* depth first traversal. We'll do it in groups of this number of TD's */ |
/* to make sure it doesn't hog all of the bandwidth */ |
#define DEPTH_INTERVAL 5 |
/* |
* Technically, updating td->status here is a race, but it's not really a |
* problem. The worst that can happen is that we set the IOC bit again |
* generating a spurious interrupt. We could fix this by creating another |
* QH and leaving the IOC bit always set, but then we would have to play |
* games with the FSBR code to make sure we get the correct order in all |
* the cases. I don't think it's worth the effort |
*/ |
static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static inline void uhci_add_complete(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
unsigned long flags; |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
list_add_tail(&urbp->complete_list, &uhci->complete_list); |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
} |
static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev) |
{ |
dma_addr_t dma_handle; |
struct uhci_td *td; |
td = pci_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); |
if (!td) |
return NULL; |
td->dma_handle = dma_handle; |
td->link = UHCI_PTR_TERM; |
td->buffer = 0; |
td->frame = -1; |
td->dev = dev; |
INIT_LIST_HEAD(&td->list); |
INIT_LIST_HEAD(&td->remove_list); |
INIT_LIST_HEAD(&td->fl_list); |
usb_get_dev(dev); |
return td; |
} |
static inline void uhci_fill_td(struct uhci_td *td, __u32 status, |
__u32 token, __u32 buffer) |
{ |
td->status = cpu_to_le32(status); |
td->token = cpu_to_le32(token); |
td->buffer = cpu_to_le32(buffer); |
} |
/* |
* We insert Isochronous URB's directly into the frame list at the beginning |
*/ |
static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) |
{ |
unsigned long flags; |
framenum %= UHCI_NUMFRAMES; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
td->frame = framenum; |
/* Is there a TD already mapped there? */ |
if (uhci->fl->frame_cpu[framenum]) { |
struct uhci_td *ftd, *ltd; |
ftd = uhci->fl->frame_cpu[framenum]; |
ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
list_add_tail(&td->fl_list, &ftd->fl_list); |
td->link = ltd->link; |
mb(); |
ltd->link = cpu_to_le32(td->dma_handle); |
} else { |
td->link = uhci->fl->frame[framenum]; |
mb(); |
uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle); |
uhci->fl->frame_cpu[framenum] = td; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td) |
{ |
unsigned long flags; |
/* If it's not inserted, don't remove it */ |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (td->frame == -1 && list_empty(&td->fl_list)) |
goto out; |
if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) { |
if (list_empty(&td->fl_list)) { |
uhci->fl->frame[td->frame] = td->link; |
uhci->fl->frame_cpu[td->frame] = NULL; |
} else { |
struct uhci_td *ntd; |
ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); |
uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
uhci->fl->frame_cpu[td->frame] = ntd; |
} |
} else { |
struct uhci_td *ptd; |
ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); |
ptd->link = td->link; |
} |
mb(); |
td->link = UHCI_PTR_TERM; |
list_del_init(&td->fl_list); |
td->frame = -1; |
out: |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Inserts a td into qh list at the top. |
*/ |
static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_td *td, *ptd; |
if (list_empty(&urbp->td_list)) |
return; |
head = &urbp->td_list; |
tmp = head->next; |
/* Ordering isn't important here yet since the QH hasn't been */ |
/* inserted into the schedule yet */ |
td = list_entry(tmp, struct uhci_td, list); |
/* Add the first TD to the QH element pointer */ |
qh->element = cpu_to_le32(td->dma_handle) | breadth; |
ptd = td; |
/* Then link the rest of the TD's */ |
tmp = tmp->next; |
while (tmp != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
ptd->link = cpu_to_le32(td->dma_handle) | breadth; |
ptd = td; |
} |
ptd->link = UHCI_PTR_TERM; |
} |
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
{ |
if (!list_empty(&td->list)) |
dbg("td %p is still in list!", td); |
if (!list_empty(&td->remove_list)) |
dbg("td %p still in remove_list!", td); |
if (!list_empty(&td->fl_list)) |
dbg("td %p is still in fl_list!", td); |
if (td->dev) |
usb_put_dev(td->dev); |
pci_pool_free(uhci->td_pool, td, td->dma_handle); |
} |
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev) |
{ |
dma_addr_t dma_handle; |
struct uhci_qh *qh; |
qh = pci_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); |
if (!qh) |
return NULL; |
qh->dma_handle = dma_handle; |
qh->element = UHCI_PTR_TERM; |
qh->link = UHCI_PTR_TERM; |
qh->dev = dev; |
qh->urbp = NULL; |
INIT_LIST_HEAD(&qh->list); |
INIT_LIST_HEAD(&qh->remove_list); |
usb_get_dev(dev); |
return qh; |
} |
static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
{ |
if (!list_empty(&qh->list)) |
dbg("qh %p list not empty!", qh); |
if (!list_empty(&qh->remove_list)) |
dbg("qh %p still in remove_list!", qh); |
if (qh->dev) |
usb_put_dev(qh->dev); |
pci_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
} |
/* |
* Append this urb's qh after the last qh in skelqh->list |
* MUST be called with uhci->frame_list_lock acquired |
* |
* Note that urb_priv.queue_list doesn't have a separate queue head; |
* it's a ring with every element "live". |
*/ |
static void _uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *tmp; |
struct uhci_qh *lqh; |
/* Grab the last QH */ |
lqh = list_entry(skelqh->list.prev, struct uhci_qh, list); |
/* |
* Patch this endpoint's URB's QHs to point to the next skelqh: |
* skelqh --> ... lqh --> newqh --> next skelqh |
* Do this first, so the HC always sees the right QH after this one. |
*/ |
list_for_each (tmp, &urbp->queue_list) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
turbp->qh->link = lqh->link; |
} |
urbp->qh->link = lqh->link; |
wmb(); /* Ordering is important */ |
/* |
* Patch QHs for previous endpoint's queued URBs? HC goes |
* here next, not to the next skelqh it now points to. |
* |
* lqh --> td ... --> qh ... --> td --> qh ... --> td |
* | | | |
* v v v |
* +<----------------+-----------------+ |
* v |
* newqh --> td ... --> td |
* | |
* v |
* ... |
* |
* The HC could see (and use!) any of these as we write them. |
*/ |
if (lqh->urbp) { |
list_for_each (tmp, &lqh->urbp->queue_list) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
} |
} |
lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&urbp->qh->list, &skelqh->list); |
} |
static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
_uhci_insert_qh(uhci, skelqh, urb); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Start removal of QH from schedule; it finishes next frame. |
* TDs should be unlinked before this is called. |
*/ |
static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
{ |
unsigned long flags; |
struct uhci_qh *pqh; |
if (!qh) |
return; |
qh->urbp = NULL; |
/* |
* Only go through the hoops if it's actually linked in |
* Queued QHs are removed in uhci_delete_queued_urb, |
* since (for queued URBs) the pqh is pointed to the next |
* QH in the queue, not the next endpoint's QH. |
*/ |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (!list_empty(&qh->list)) { |
pqh = list_entry(qh->list.prev, struct uhci_qh, list); |
if (pqh->urbp) { |
struct list_head *head, *tmp; |
head = &pqh->urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
turbp->qh->link = qh->link; |
} |
} |
pqh->link = qh->link; |
mb(); |
/* Leave qh->link in case the HC is on the QH now, it will */ |
/* continue the rest of the schedule */ |
qh->element = UHCI_PTR_TERM; |
list_del_init(&qh->list); |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
/* Check to see if the remove list is empty. Set the IOC bit */ |
/* to force an interrupt so we can remove the QH */ |
if (list_empty(&uhci->qh_remove_list)) |
uhci_set_next_interrupt(uhci); |
list_add(&qh->remove_list, &uhci->qh_remove_list); |
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
} |
static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *head, *tmp; |
head = &urbp->td_list; |
tmp = head->next; |
while (head != tmp) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
if (toggle) |
td->token |= cpu_to_le32(TD_TOKEN_TOGGLE); |
else |
td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE); |
toggle ^= 1; |
} |
return toggle; |
} |
/* This function will append one URB's QH to another URB's QH. This is for */ |
/* queuing interrupt, control or bulk transfers */ |
static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb) |
{ |
struct urb_priv *eurbp, *urbp, *furbp, *lurbp; |
struct list_head *tmp; |
struct uhci_td *lltd; |
unsigned long flags; |
eurbp = eurb->hcpriv; |
urbp = urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
/* Find the first URB in the queue */ |
if (eurbp->queued) { |
struct list_head *head = &eurbp->queue_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
if (!turbp->queued) |
break; |
tmp = tmp->next; |
} |
} else |
tmp = &eurbp->queue_list; |
furbp = list_entry(tmp, struct urb_priv, queue_list); |
lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list); |
lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list); |
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), |
uhci_fixup_toggle(urb, uhci_toggle(td_token(lltd)) ^ 1)); |
/* All qh's in the queue need to link to the next queue */ |
urbp->qh->link = eurbp->qh->link; |
mb(); /* Make sure we flush everything */ |
lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&urbp->queue_list, &furbp->queue_list); |
urbp->queued = 1; |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp, *nurbp; |
struct list_head *head, *tmp; |
struct urb_priv *purbp; |
struct uhci_td *pltd; |
unsigned int toggle; |
unsigned long flags; |
urbp = urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (list_empty(&urbp->queue_list)) |
goto out; |
nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list); |
/* Fix up the toggle for the next URB's */ |
if (!urbp->queued) |
/* We just set the toggle in uhci_unlink_generic */ |
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); |
else { |
/* If we're in the middle of the queue, grab the toggle */ |
/* from the TD previous to us */ |
purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
queue_list); |
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
toggle = uhci_toggle(td_token(pltd)) ^ 1; |
} |
head = &urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp; |
turbp = list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
if (!turbp->queued) |
break; |
toggle = uhci_fixup_toggle(turbp->urb, toggle); |
} |
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe), toggle); |
if (!urbp->queued) { |
struct uhci_qh *pqh; |
nurbp->queued = 0; |
/* |
* Fixup the previous QH's queue to link to the new head |
* of this queue. |
*/ |
pqh = list_entry(urbp->qh->list.prev, struct uhci_qh, list); |
if (pqh->urbp) { |
struct list_head *head, *tmp; |
head = &pqh->urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
turbp->qh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
} |
} |
pqh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&nurbp->qh->list, &urbp->qh->list); |
list_del_init(&urbp->qh->list); |
} else { |
/* We're somewhere in the middle (or end). A bit trickier */ |
/* than the head scenario */ |
purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
queue_list); |
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
if (nurbp->queued) |
pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
else |
/* The next URB happens to be the beginning, so */ |
/* we're the last, end the chain */ |
pltd->link = UHCI_PTR_TERM; |
} |
list_del_init(&urbp->queue_list); |
out: |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
extern void* malloc(int size); |
static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp; |
urbp = malloc(sizeof(struct urb_priv)); //**kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); |
if (!urbp) { |
err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n"); |
return NULL; |
} |
memset((void *)urbp, 0, sizeof(*urbp)); |
urbp->inserttime = jiffies26; |
urbp->fsbrtime = jiffies26; |
urbp->urb = urb; |
urbp->dev = urb->dev; |
INIT_LIST_HEAD(&urbp->td_list); |
INIT_LIST_HEAD(&urbp->queue_list); |
INIT_LIST_HEAD(&urbp->complete_list); |
INIT_LIST_HEAD(&urbp->urb_list); |
list_add_tail(&urbp->urb_list, &uhci->urb_list); |
urb->hcpriv = urbp; |
return urbp; |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
td->urb = urb; |
list_add_tail(&td->list, &urbp->td_list); |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_remove_td_from_urb(struct uhci_td *td) |
{ |
if (list_empty(&td->list)) |
return; |
list_del_init(&td->list); |
td->urb = NULL; |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *head, *tmp; |
struct urb_priv *urbp; |
unsigned long flags; |
urbp = (struct urb_priv *)urb->hcpriv; |
if (!urbp) |
return; |
if (!list_empty(&urbp->urb_list)) |
warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb); |
if (!list_empty(&urbp->complete_list)) |
warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb); |
spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
/* Check to see if the remove list is empty. Set the IOC bit */ |
/* to force an interrupt so we can remove the TD's*/ |
if (list_empty(&uhci->td_remove_list)) |
uhci_set_next_interrupt(uhci); |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
uhci_remove_td_from_urb(td); |
uhci_remove_td(uhci, td); |
list_add(&td->remove_list, &uhci->td_remove_list); |
} |
spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
urb->hcpriv = NULL; |
//**kmem_cache_free(uhci_up_cachep, urbp); |
free(urbp); |
} |
static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
{ |
unsigned long flags; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { |
urbp->fsbr = 1; |
if (!uhci->fsbr++ && !uhci->fsbrtimeout) |
uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
{ |
unsigned long flags; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { |
urbp->fsbr = 0; |
if (!--uhci->fsbr) |
uhci->fsbrtimeout = jiffies26 + FSBR_DELAY; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Map status to standard result codes |
* |
* <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)] |
* <dir_out> is True for output TDs and False for input TDs. |
*/ |
static int uhci_map_status(int status, int dir_out) |
{ |
if (!status) |
return 0; |
if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ |
return -EPROTO; |
if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ |
if (dir_out) |
return -ETIMEDOUT; |
else |
return -EILSEQ; |
} |
if (status & TD_CTRL_NAK) /* NAK */ |
return -ETIMEDOUT; |
if (status & TD_CTRL_BABBLE) /* Babble */ |
return -EOVERFLOW; |
if (status & TD_CTRL_DBUFERR) /* Buffer error */ |
return -ENOSR; |
if (status & TD_CTRL_STALLED) /* Stalled */ |
return -EPIPE; |
if (status & TD_CTRL_ACTIVE) /* Active */ |
return 0; |
return -EINVAL; |
} |
/* |
* Control transfers |
*/ |
static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_td *td; |
struct uhci_qh *qh, *skelqh; |
unsigned long destination, status; |
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
int len = urb->transfer_buffer_length; |
dma_addr_t data = urb->transfer_dma; |
/* The "pipe" thing contains the destination in bits 8--18 */ |
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
/* 3 errors */ |
status = TD_CTRL_ACTIVE | uhci_maxerr(3); |
if (urb->dev->speed == USB_SPEED_LOW) |
status |= TD_CTRL_LS; |
/* |
* Build the TD for the control request |
*/ |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(7), |
urb->setup_dma); |
/* |
* If direction is "send", change the frame from SETUP (0x2D) |
* to OUT (0xE1). Else change it from SETUP to IN (0x69). |
*/ |
destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe)); |
if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
status |= TD_CTRL_SPD; |
/* |
* Build the DATA TD's |
*/ |
while (len > 0) { |
int pktsze = len; |
if (pktsze > maxsze) |
pktsze = maxsze; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
/* Alternate Data0/1 (start with Data1) */ |
destination ^= TD_TOKEN_TOGGLE; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1), |
data); |
data += pktsze; |
len -= pktsze; |
} |
/* |
* Build the final TD for control status |
*/ |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
/* |
* It's IN if the pipe is an output pipe or we're not expecting |
* data back. |
*/ |
destination &= ~TD_TOKEN_PID_MASK; |
if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) |
destination |= USB_PID_IN; |
else |
destination |= USB_PID_OUT; |
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
status &= ~TD_CTRL_SPD; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status | TD_CTRL_IOC, |
destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0); |
qh = uhci_alloc_qh(uhci, urb->dev); |
if (!qh) |
return -ENOMEM; |
urbp->qh = qh; |
qh->urbp = urbp; |
uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
/* Low speed transfers get a different queue, and won't hog the bus */ |
if (urb->dev->speed == USB_SPEED_LOW) |
skelqh = uhci->skel_ls_control_qh; |
else { |
skelqh = uhci->skel_hs_control_qh; |
uhci_inc_fsbr(uhci, urb); |
} |
if (eurb) |
uhci_append_queued_urb(uhci, eurb, urb); |
else |
uhci_insert_qh(uhci, skelqh, urb); |
return -EINPROGRESS; |
} |
/* |
* If control was short, then end status packet wasn't sent, so this |
* reorganize s so it's sent to finish the transfer. The original QH is |
* removed from the skel and discarded; all TDs except the last (status) |
* are deleted; the last (status) TD is put on a new QH which is reinserted |
* into the skel. Since the last TD and urb_priv are reused, the TD->link |
* and urb_priv maintain any queued QHs. |
*/ |
static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
urbp->short_control_packet = 1; |
/* Create a new QH to avoid pointer overwriting problems */ |
uhci_remove_qh(uhci, urbp->qh); |
/* Delete all of the TD's except for the status TD at the end */ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head && tmp->next != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
uhci_remove_td_from_urb(td); |
uhci_remove_td(uhci, td); |
uhci_free_td(uhci, td); |
} |
urbp->qh = uhci_alloc_qh(uhci, urb->dev); |
if (!urbp->qh) { |
err("unable to allocate new QH for control retrigger"); |
return -ENOMEM; |
} |
urbp->qh->urbp = urbp; |
/* One TD, who cares about Breadth first? */ |
uhci_insert_tds_in_qh(urbp->qh, urb, UHCI_PTR_DEPTH); |
/* Low speed transfers get a different queue */ |
if (urb->dev->speed == USB_SPEED_LOW) |
uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb); |
else |
uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb); |
return -EINPROGRESS; |
} |
static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = urb->hcpriv; |
struct uhci_td *td; |
unsigned int status; |
int ret = 0; |
if (list_empty(&urbp->td_list)) |
return -EINVAL; |
head = &urbp->td_list; |
if (urbp->short_control_packet) { |
tmp = head->prev; |
goto status_phase; |
} |
tmp = head->next; |
td = list_entry(tmp, struct uhci_td, list); |
/* The first TD is the SETUP phase, check the status, but skip */ |
/* the count */ |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
if (status) |
goto td_error; |
urb->actual_length = 0; |
/* The rest of the TD's (but the last) are data */ |
tmp = tmp->next; |
while (tmp != head && tmp->next != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
urb->actual_length += uhci_actual_length(td_status(td)); |
if (status) |
goto td_error; |
/* Check to see if we received a short packet */ |
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
ret = -EREMOTEIO; |
goto err; |
} |
if (uhci_packetid(td_token(td)) == USB_PID_IN) |
return usb_control_retrigger_status(uhci, urb); |
else |
return 0; |
} |
} |
status_phase: |
td = list_entry(tmp, struct uhci_td, list); |
/* Control status phase */ |
status = td_status(td); |
#ifdef I_HAVE_BUGGY_APC_BACKUPS |
/* APC BackUPS Pro kludge */ |
/* It tries to send all of the descriptor instead of the amount */ |
/* we requested */ |
if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ |
status & TD_CTRL_ACTIVE && |
status & TD_CTRL_NAK) |
return 0; |
#endif |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
if (uhci_status_bits(status)) |
goto td_error; |
return 0; |
td_error: |
ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
err: |
if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
/* Some debugging code */ |
dbg("uhci_result_control() failed with status %x", status); |
if (errbuf) { |
/* Print the chain for debugging purposes */ |
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
lprintk(errbuf); |
} |
} |
return ret; |
} |
/* |
* Common submit for bulk and interrupt |
*/ |
static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh) |
{ |
struct uhci_td *td; |
struct uhci_qh *qh; |
unsigned long destination, status; |
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
int len = urb->transfer_buffer_length; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
dma_addr_t data = urb->transfer_dma; |
if (len < 0) |
return -EINVAL; |
/* The "pipe" thing contains the destination in bits 8--18 */ |
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
status = uhci_maxerr(3) | TD_CTRL_ACTIVE; |
if (urb->dev->speed == USB_SPEED_LOW) |
status |= TD_CTRL_LS; |
if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
status |= TD_CTRL_SPD; |
/* |
* Build the DATA TD's |
*/ |
do { /* Allow zero length packets */ |
int pktsze = len; |
if (pktsze > maxsze) |
pktsze = maxsze; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) | |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
data); |
data += pktsze; |
len -= maxsze; |
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)); |
} while (len > 0); |
/* |
* URB_ZERO_PACKET means adding a 0-length packet, if direction |
* is OUT and the transfer_length was an exact multiple of maxsze, |
* hence (len = transfer_length - N * maxsze) == 0 |
* however, if transfer_length == 0, the zero packet was already |
* prepared above. |
*/ |
if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) && |
!len && urb->transfer_buffer_length) { |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) | |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
data); |
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)); |
} |
/* Set the flag on the last packet */ |
td->status |= cpu_to_le32(TD_CTRL_IOC); |
qh = uhci_alloc_qh(uhci, urb->dev); |
if (!qh) |
return -ENOMEM; |
urbp->qh = qh; |
qh->urbp = urbp; |
/* Always breadth first */ |
uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
if (eurb) |
uhci_append_queued_urb(uhci, eurb, urb); |
else |
uhci_insert_qh(uhci, skelqh, urb); |
return -EINPROGRESS; |
} |
/* |
* Common result for bulk and interrupt |
*/ |
static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = urb->hcpriv; |
struct uhci_td *td; |
unsigned int status = 0; |
int ret = 0; |
urb->actual_length = 0; |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
urb->actual_length += uhci_actual_length(td_status(td)); |
if (status) |
goto td_error; |
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
ret = -EREMOTEIO; |
goto err; |
} else |
return 0; |
} |
} |
return 0; |
td_error: |
ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
if (ret == -EPIPE) |
/* endpoint has stalled - mark it halted */ |
usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td))); |
err: |
/* |
* Enable this chunk of code if you want to see some more debugging. |
* But be careful, it has the tendancy to starve out khubd and prevent |
* disconnects from happening successfully if you have a slow debug |
* log interface (like a serial console. |
*/ |
#if 0 |
if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
/* Some debugging code */ |
dbg("uhci_result_common() failed with status %x", status); |
if (errbuf) { |
/* Print the chain for debugging purposes */ |
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
lprintk(errbuf); |
} |
} |
#endif |
return ret; |
} |
static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
int ret; |
/* Can't have low speed bulk transfers */ |
if (urb->dev->speed == USB_SPEED_LOW) |
return -EINVAL; |
ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh); |
if (ret == -EINPROGRESS) |
uhci_inc_fsbr(uhci, urb); |
return ret; |
} |
static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
/* USB 1.1 interrupt transfers only involve one packet per interval; |
* that's the uhci_submit_common() "breadth first" policy. Drivers |
* can submit urbs of any length, but longer ones might need many |
* intervals to complete. |
*/ |
return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]); |
} |
/* |
* Bulk and interrupt use common result |
*/ |
#define uhci_result_bulk uhci_result_common |
#define uhci_result_interrupt uhci_result_common |
/* |
* Isochronous transfers |
*/ |
static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) |
{ |
struct urb *last_urb = NULL; |
struct list_head *tmp, *head; |
int ret = 0; |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
/* look for pending URB's with identical pipe handle */ |
if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && |
(u->status == -EINPROGRESS) && (u != urb)) { |
if (!last_urb) |
*start = u->start_frame; |
last_urb = u; |
} |
} |
if (last_urb) { |
*end = (last_urb->start_frame + last_urb->number_of_packets * |
last_urb->interval) & (UHCI_NUMFRAMES-1); |
ret = 0; |
} else |
ret = -1; /* no previous urb found */ |
return ret; |
} |
static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb) |
{ |
int limits; |
unsigned int start = 0, end = 0; |
if (urb->number_of_packets > 900) /* 900? Why? */ |
return -EFBIG; |
limits = isochronous_find_limits(uhci, urb, &start, &end); |
if (urb->transfer_flags & URB_ISO_ASAP) { |
if (limits) { |
int curframe; |
curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES; |
urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES; |
} else |
urb->start_frame = end; |
} else { |
urb->start_frame %= UHCI_NUMFRAMES; |
/* FIXME: Sanity check */ |
} |
return 0; |
} |
/* |
* Isochronous transfers |
*/ |
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct uhci_td *td; |
int i, ret, frame; |
int status, destination; |
status = TD_CTRL_ACTIVE | TD_CTRL_IOS; |
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
ret = isochronous_find_start(uhci, urb); |
if (ret) |
return ret; |
frame = urb->start_frame; |
for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) { |
if (!urb->iso_frame_desc[i].length) |
continue; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1), |
urb->transfer_dma + urb->iso_frame_desc[i].offset); |
if (i + 1 >= urb->number_of_packets) |
td->status |= cpu_to_le32(TD_CTRL_IOC); |
uhci_insert_td_frame_list(uhci, td, frame); |
} |
return -EINPROGRESS; |
} |
static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
int status; |
int i, ret = 0; |
urb->actual_length = 0; |
i = 0; |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
int actlength; |
tmp = tmp->next; |
if (td_status(td) & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
actlength = uhci_actual_length(td_status(td)); |
urb->iso_frame_desc[i].actual_length = actlength; |
urb->actual_length += actlength; |
status = uhci_map_status(uhci_status_bits(td_status(td)), usb_pipeout(urb->pipe)); |
urb->iso_frame_desc[i].status = status; |
if (status) { |
urb->error_count++; |
ret = status; |
} |
i++; |
} |
return ret; |
} |
/* |
* MUST be called with uhci->urb_list_lock acquired |
*/ |
static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
/* We don't match Isoc transfers since they are special */ |
if (usb_pipeisoc(urb->pipe)) |
return NULL; |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
if (u->dev == urb->dev && u->status == -EINPROGRESS) { |
/* For control, ignore the direction */ |
if (usb_pipecontrol(urb->pipe) && |
(u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) |
return u; |
else if (u->pipe == urb->pipe) |
return u; |
} |
} |
return NULL; |
} |
static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags) |
{ |
int ret = -EINVAL; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned long flags; |
struct urb *eurb; |
int bustime; |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
eurb = uhci_find_urb_ep(uhci, urb); |
if (!uhci_alloc_urb_priv(uhci, urb)) { |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return -ENOMEM; |
} |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
ret = uhci_submit_control(uhci, urb, eurb); |
break; |
case PIPE_INTERRUPT: |
if (!eurb) { |
bustime = usb_check_bandwidth(urb->dev, urb); |
if (bustime < 0) |
ret = bustime; |
else { |
ret = uhci_submit_interrupt(uhci, urb, eurb); |
if (ret == -EINPROGRESS) |
usb_claim_bandwidth(urb->dev, urb, bustime, 0); |
} |
} else { /* inherit from parent */ |
urb->bandwidth = eurb->bandwidth; |
ret = uhci_submit_interrupt(uhci, urb, eurb); |
} |
break; |
case PIPE_BULK: |
ret = uhci_submit_bulk(uhci, urb, eurb); |
break; |
case PIPE_ISOCHRONOUS: |
bustime = usb_check_bandwidth(urb->dev, urb); |
if (bustime < 0) { |
ret = bustime; |
break; |
} |
ret = uhci_submit_isochronous(uhci, urb); |
if (ret == -EINPROGRESS) |
usb_claim_bandwidth(urb->dev, urb, bustime, 1); |
break; |
} |
if (ret != -EINPROGRESS) { |
/* Submit failed, so delete it from the urb_list */ |
struct urb_priv *urbp = urb->hcpriv; |
list_del_init(&urbp->urb_list); |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
uhci_destroy_urb_priv (uhci, urb); |
return ret; |
} |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return 0; |
} |
/* |
* Return the result of a transfer |
* |
* MUST be called with urb_list_lock acquired |
*/ |
static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) |
{ |
int ret = -EINVAL; |
unsigned long flags; |
struct urb_priv *urbp; |
spin_lock_irqsave(&urb->lock, flags); |
urbp = (struct urb_priv *)urb->hcpriv; |
if (urb->status != -EINPROGRESS) { |
info("uhci_transfer_result: called for URB %p not in flight?", urb); |
goto out; |
} |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
ret = uhci_result_control(uhci, urb); |
break; |
case PIPE_INTERRUPT: |
ret = uhci_result_interrupt(uhci, urb); |
break; |
case PIPE_BULK: |
ret = uhci_result_bulk(uhci, urb); |
break; |
case PIPE_ISOCHRONOUS: |
ret = uhci_result_isochronous(uhci, urb); |
break; |
} |
urbp->status = ret; |
if (ret == -EINPROGRESS) |
goto out; |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
case PIPE_BULK: |
case PIPE_ISOCHRONOUS: |
/* Release bandwidth for Interrupt or Isoc. transfers */ |
/* Spinlock needed ? */ |
if (urb->bandwidth) |
usb_release_bandwidth(urb->dev, urb, 1); |
uhci_unlink_generic(uhci, urb); |
break; |
case PIPE_INTERRUPT: |
/* Release bandwidth for Interrupt or Isoc. transfers */ |
/* Make sure we don't release if we have a queued URB */ |
spin_lock(&uhci->frame_list_lock); |
/* Spinlock needed ? */ |
if (list_empty(&urbp->queue_list) && urb->bandwidth) |
usb_release_bandwidth(urb->dev, urb, 0); |
else |
/* bandwidth was passed on to queued URB, */ |
/* so don't let usb_unlink_urb() release it */ |
urb->bandwidth = 0; |
spin_unlock(&uhci->frame_list_lock); |
uhci_unlink_generic(uhci, urb); |
break; |
default: |
info("uhci_transfer_result: unknown pipe type %d for urb %p\n", |
usb_pipetype(urb->pipe), urb); |
} |
/* Remove it from uhci->urb_list */ |
list_del_init(&urbp->urb_list); |
uhci_add_complete(uhci, urb); |
out: |
spin_unlock_irqrestore(&urb->lock, flags); |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *head, *tmp; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
int prevactive = 1; |
/* We can get called when urbp allocation fails, so check */ |
if (!urbp) |
return; |
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ |
/* |
* Now we need to find out what the last successful toggle was |
* so we can update the local data toggle for the next transfer |
* |
* There's 3 way's the last successful completed TD is found: |
* |
* 1) The TD is NOT active and the actual length < expected length |
* 2) The TD is NOT active and it's the last TD in the chain |
* 3) The TD is active and the previous TD is NOT active |
* |
* Control and Isochronous ignore the toggle, so this is safe |
* for all types |
*/ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
if (!(td_status(td) & TD_CTRL_ACTIVE) && |
(uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) || |
tmp == head)) |
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td)), |
uhci_toggle(td_token(td)) ^ 1); |
else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive) |
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td)), |
uhci_toggle(td_token(td))); |
prevactive = td_status(td) & TD_CTRL_ACTIVE; |
} |
uhci_delete_queued_urb(uhci, urb); |
/* The interrupt loop will reclaim the QH's */ |
uhci_remove_qh(uhci, urbp->qh); |
urbp->qh = NULL; |
} |
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned long flags; |
struct urb_priv *urbp = urb->hcpriv; |
/* If this is an interrupt URB that is being killed in urb->complete, */ |
/* then just set its status and return */ |
if (!urbp) { |
urb->status = -ECONNRESET; |
return 0; |
} |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
list_del_init(&urbp->urb_list); |
uhci_unlink_generic(uhci, urb); |
spin_lock(&uhci->urb_remove_list_lock); |
/* If we're the first, set the next interrupt bit */ |
if (list_empty(&uhci->urb_remove_list)) |
uhci_set_next_interrupt(uhci); |
list_add(&urbp->urb_list, &uhci->urb_remove_list); |
spin_unlock(&uhci->urb_remove_list_lock); |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return 0; |
} |
static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *head, *tmp; |
int count = 0; |
uhci_dec_fsbr(uhci, urb); |
urbp->fsbr_timeout = 1; |
/* |
* Ideally we would want to fix qh->element as well, but it's |
* read/write by the HC, so that can introduce a race. It's not |
* really worth the hassle |
*/ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
/* |
* Make sure we don't do the last one (since it'll have the |
* TERM bit set) as well as we skip every so many TD's to |
* make sure it doesn't hog the bandwidth |
*/ |
if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1)) |
td->link |= UHCI_PTR_DEPTH; |
count++; |
} |
return 0; |
} |
/* |
* uhci_get_current_frame_number() |
* |
* returns the current frame number for a USB bus/controller. |
*/ |
static int uhci_get_current_frame_number(struct uhci_hcd *uhci) |
{ |
return inw(uhci->io_addr + USBFRNUM); |
} |
static int init_stall_timer(struct usb_hcd *hcd); |
static void stall_callback(unsigned long ptr) |
{ |
struct usb_hcd *hcd = (struct usb_hcd *)ptr; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
struct list_head list, *tmp, *head; |
unsigned long flags; |
INIT_LIST_HEAD(&list); |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
spin_lock(&u->lock); |
/* Check if the FSBR timed out */ |
if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies26, up->fsbrtime + IDLE_TIMEOUT)) |
uhci_fsbr_timeout(uhci, u); |
/* Check if the URB timed out */ |
if (u->timeout && time_after_eq(jiffies26, up->inserttime + u->timeout)) |
list_move_tail(&up->urb_list, &list); |
spin_unlock(&u->lock); |
} |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
head = &list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
uhci_urb_dequeue(hcd, u); |
} |
/* Really disable FSBR */ |
if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies26, uhci->fsbrtimeout)) { |
uhci->fsbrtimeout = 0; |
uhci->skel_term_qh->link = UHCI_PTR_TERM; |
} |
/* Poll for and perform state transitions */ |
hc_state_transitions(uhci); |
init_stall_timer(hcd); |
} |
static int init_stall_timer(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
init_timer(&uhci->stall_timer); |
uhci->stall_timer.function = stall_callback; |
uhci->stall_timer.data = (unsigned long)hcd; |
uhci->stall_timer.expires = jiffies26 + (HZ / 10); |
add_timer(&uhci->stall_timer); |
return 0; |
} |
static void uhci_free_pending_qhs(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
head = &uhci->qh_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list); |
tmp = tmp->next; |
list_del_init(&qh->remove_list); |
uhci_free_qh(uhci, qh); |
} |
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
} |
static void uhci_free_pending_tds(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
head = &uhci->td_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list); |
tmp = tmp->next; |
list_del_init(&td->remove_list); |
uhci_free_td(uhci, td); |
} |
spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
} |
static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
int status; |
unsigned long flags; |
spin_lock_irqsave(&urb->lock, flags); |
status = urbp->status; |
uhci_destroy_urb_priv(uhci, urb); |
if (urb->status != -ENOENT && urb->status != -ECONNRESET) |
urb->status = status; |
spin_unlock_irqrestore(&urb->lock, flags); |
usb_hcd_giveback_urb(hcd, urb, regs); |
} |
static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
head = &uhci->complete_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list); |
struct urb *urb = urbp->urb; |
list_del_init(&urbp->complete_list); |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
uhci_finish_urb(hcd, urb, regs); |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
head = &uhci->complete_list; |
tmp = head->next; |
} |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
} |
static void uhci_remove_pending_qhs(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->urb_remove_list_lock, flags); |
head = &uhci->urb_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *urb = urbp->urb; |
tmp = tmp->next; |
list_del_init(&urbp->urb_list); |
urbp->status = urb->status = -ECONNRESET; |
uhci_add_complete(uhci, urb); |
} |
spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags); |
} |
static void uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned int io_addr = uhci->io_addr; |
unsigned short status; |
struct list_head *tmp, *head; |
static int count =0; |
/* |
* Read the interrupt status, and write it back to clear the |
* interrupt cause |
*/ |
status = inw(io_addr + USBSTS); |
if (!status) /* shared interrupt, not mine */ |
return; |
outw(status, io_addr + USBSTS); /* Clear it */ |
// printk("%x uhci_irq\n", io_addr); |
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { |
if (status & USBSTS_HSE) |
{ |
err("%x: host system error, PCI problems?", io_addr); |
} |
if (status & USBSTS_HCPE) |
err("%x: host controller process error. something bad happened", io_addr); |
if ((status & USBSTS_HCH) && uhci->state > 0) { |
err("%x: host controller halted. very bad", io_addr); |
/* FIXME: Reset the controller, fix the offending TD */ |
} |
} |
if (status & USBSTS_RD) |
uhci->resume_detect = 1; |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
uhci_remove_pending_qhs(uhci); |
uhci_clear_next_interrupt(uhci); |
/* Walk the list of pending URB's to see which ones completed */ |
spin_lock(&uhci->urb_list_lock); |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *urb = urbp->urb; |
tmp = tmp->next; |
/* Checks the status and does all of the magic necessary */ |
uhci_transfer_result(uhci, urb); |
} |
spin_unlock(&uhci->urb_list_lock); |
uhci_finish_completion(hcd, regs); |
} |
static void reset_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
/* Global reset for 50ms */ |
uhci->state = UHCI_RESET; |
outw(USBCMD_GRESET, io_addr + USBCMD); |
set_current_state(TASK_UNINTERRUPTIBLE); |
schedule_timeout((HZ*50+999) / 1000); |
outw(0, io_addr + USBCMD); |
/* Another 10ms delay */ |
set_current_state(TASK_UNINTERRUPTIBLE); |
schedule_timeout((HZ*10+999) / 1000); |
uhci->resume_detect = 0; |
} |
static void suspend_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
dbg("%x: suspend_hc", io_addr); |
uhci->state = UHCI_SUSPENDED; |
uhci->resume_detect = 0; |
outw(USBCMD_EGSM, io_addr + USBCMD); |
} |
static void wakeup_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
switch (uhci->state) { |
case UHCI_SUSPENDED: /* Start the resume */ |
dbg("%x: wakeup_hc", io_addr); |
/* Global resume for >= 20ms */ |
outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD); |
uhci->state = UHCI_RESUMING_1; |
uhci->state_end = jiffies26 + (20*HZ+999) / 1000; |
break; |
case UHCI_RESUMING_1: /* End global resume */ |
uhci->state = UHCI_RESUMING_2; |
outw(0, io_addr + USBCMD); |
/* Falls through */ |
case UHCI_RESUMING_2: /* Wait for EOP to be sent */ |
if (inw(io_addr + USBCMD) & USBCMD_FGR) |
break; |
/* Run for at least 1 second, and |
* mark it configured with a 64-byte max packet */ |
uhci->state = UHCI_RUNNING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, |
io_addr + USBCMD); |
break; |
case UHCI_RUNNING_GRACE: /* Now allowed to suspend */ |
uhci->state = UHCI_RUNNING; |
break; |
default: |
break; |
} |
} |
static int ports_active(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int connection = 0; |
int i; |
for (i = 0; i < uhci->rh_numports; i++) |
connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS); |
return connection; |
} |
static int suspend_allowed(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int i; |
if (!uhci->hcd.pdev || uhci->hcd.pdev->vendor != PCI_VENDOR_ID_INTEL) |
return 1; |
/* Some of Intel's USB controllers have a bug that causes false |
* resume indications if any port has an over current condition. |
* To prevent problems, we will not allow a global suspend if |
* any ports are OC. |
* |
* Some motherboards using Intel's chipsets (but not using all |
* the USB ports) appear to hardwire the over current inputs active |
* to disable the USB ports. |
*/ |
/* check for over current condition on any port */ |
for (i = 0; i < uhci->rh_numports; i++) { |
if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC) |
return 0; |
} |
return 1; |
} |
static void hc_state_transitions(struct uhci_hcd *uhci) |
{ |
switch (uhci->state) { |
case UHCI_RUNNING: |
/* global suspend if nothing connected for 1 second */ |
if (!ports_active(uhci) && suspend_allowed(uhci)) { |
uhci->state = UHCI_SUSPENDING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
} |
break; |
case UHCI_SUSPENDING_GRACE: |
if (ports_active(uhci)) |
uhci->state = UHCI_RUNNING; |
else if (time_after_eq(jiffies26, uhci->state_end)) |
suspend_hc(uhci); |
break; |
case UHCI_SUSPENDED: |
/* wakeup if requested by a device */ |
if (uhci->resume_detect) |
wakeup_hc(uhci); |
break; |
case UHCI_RESUMING_1: |
case UHCI_RESUMING_2: |
case UHCI_RUNNING_GRACE: |
if (time_after_eq(jiffies26, uhci->state_end)) |
wakeup_hc(uhci); |
break; |
default: |
break; |
} |
} |
static void start_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int timeout = 1000; |
/* |
* Reset the HC - this will force us to get a |
* new notification of any already connected |
* ports due to the virtual disconnect that it |
* implies. |
*/ |
outw(USBCMD_HCRESET, io_addr + USBCMD); |
while (inw(io_addr + USBCMD) & USBCMD_HCRESET) { |
if (!--timeout) { |
printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n"); |
break; |
} |
} |
/* Turn on all interrupts */ |
outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, |
io_addr + USBINTR); |
/* Start at frame 0 */ |
outw(0, io_addr + USBFRNUM); |
outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD); |
/* Run and mark it configured with a 64-byte max packet */ |
uhci->state = UHCI_RUNNING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD); |
uhci->hcd.state = USB_STATE_RUNNING; |
#ifdef DEB |
{ |
__u32 *tdp; |
int i; |
int status = inw(io_addr + USBSTS); |
printk(KERN_INFO "[%x] Frame = %d Status =%x fl=%x\n", io_addr, inw(io_addr + USBFRNUM), status, uhci->fl->dma_handle); |
for (i=0; i<20; i++) |
{ |
int status = inw(io_addr + USBSTS); |
wait_ms26(500); |
tdp=(__u32*)uhci->fl->frame[i]; |
printk(KERN_INFO "[%x] Frame[%d] -> @%x = %x status=%x fl=%x\n", io_addr, i, uhci->fl->frame[i], *tdp, status, uhci->fl->dma_handle ); |
} |
} |
#endif |
} |
/* |
* De-allocate all resources.. |
*/ |
static void release_uhci(struct uhci_hcd *uhci) |
{ |
int i; |
for (i = 0; i < UHCI_NUM_SKELQH; i++) |
if (uhci->skelqh[i]) { |
uhci_free_qh(uhci, uhci->skelqh[i]); |
uhci->skelqh[i] = NULL; |
} |
if (uhci->term_td) { |
uhci_free_td(uhci, uhci->term_td); |
uhci->term_td = NULL; |
} |
if (uhci->qh_pool) { |
pci_pool_destroy(uhci->qh_pool); |
uhci->qh_pool = NULL; |
} |
if (uhci->td_pool) { |
pci_pool_destroy(uhci->td_pool); |
uhci->td_pool = NULL; |
} |
if (uhci->fl) { |
pci_free_consistent(uhci->hcd.pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
uhci->fl = NULL; |
} |
#ifdef CONFIG_PROC_FS |
if (uhci->proc_entry) { |
remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root); |
uhci->proc_entry = NULL; |
} |
#endif |
} |
static int uhci_reset(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
uhci->io_addr = (unsigned long) hcd->regs; |
/* Maybe kick BIOS off this hardware. Then reset, so we won't get |
* interrupts from any previous setup. |
*/ |
reset_hc(uhci); |
pci_write_config_word(hcd->pdev, USBLEGSUP, USBLEGSUP_DEFAULT); |
return 0; |
} |
/* |
* Allocate a frame list, and then setup the skeleton |
* |
* The hardware doesn't really know any difference |
* in the queues, but the order does matter for the |
* protocols higher up. The order is: |
* |
* - any isochronous events handled before any |
* of the queues. We don't do that here, because |
* we'll create the actual TD entries on demand. |
* - The first queue is the interrupt queue. |
* - The second queue is the control queue, split into low and high speed |
* - The third queue is bulk queue. |
* - The fourth queue is the bandwidth reclamation queue, which loops back |
* to the high speed control queue. |
*/ |
static int uhci_start(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
int retval = -EBUSY; |
int i, port; |
unsigned io_size; |
dma_addr_t dma_handle; |
struct usb_device *udev; |
#ifdef CONFIG_PROC_FS |
struct proc_dir_entry *ent; |
#endif |
io_size = pci_resource_len(hcd->pdev, hcd->region); |
#ifdef CONFIG_PROC_FS |
ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root); |
if (!ent) { |
err("couldn't create uhci proc entry"); |
retval = -ENOMEM; |
goto err_create_proc_entry; |
} |
ent->data = uhci; |
ent->proc_fops = &uhci_proc_operations; |
ent->size = 0; |
uhci->proc_entry = ent; |
#endif |
uhci->fsbr = 0; |
uhci->fsbrtimeout = 0; |
spin_lock_init(&uhci->qh_remove_list_lock); |
INIT_LIST_HEAD(&uhci->qh_remove_list); |
spin_lock_init(&uhci->td_remove_list_lock); |
INIT_LIST_HEAD(&uhci->td_remove_list); |
spin_lock_init(&uhci->urb_remove_list_lock); |
INIT_LIST_HEAD(&uhci->urb_remove_list); |
spin_lock_init(&uhci->urb_list_lock); |
INIT_LIST_HEAD(&uhci->urb_list); |
spin_lock_init(&uhci->complete_list_lock); |
INIT_LIST_HEAD(&uhci->complete_list); |
spin_lock_init(&uhci->frame_list_lock); |
uhci->fl = pci_alloc_consistent(hcd->pdev, sizeof(*uhci->fl), &dma_handle); |
if (!uhci->fl) { |
err("unable to allocate consistent memory for frame list"); |
goto err_alloc_fl; |
} |
memset((void *)uhci->fl, 0, sizeof(*uhci->fl)); |
uhci->fl->dma_handle = dma_handle; |
uhci->td_pool = pci_pool_create("uhci_td", hcd->pdev, |
sizeof(struct uhci_td), 16, 0); |
if (!uhci->td_pool) { |
err("unable to create td pci_pool"); |
goto err_create_td_pool; |
} |
uhci->qh_pool = pci_pool_create("uhci_qh", hcd->pdev, |
sizeof(struct uhci_qh), 16, 0); |
if (!uhci->qh_pool) { |
err("unable to create qh pci_pool"); |
goto err_create_qh_pool; |
} |
/* Initialize the root hub */ |
/* UHCI specs says devices must have 2 ports, but goes on to say */ |
/* they may have more but give no way to determine how many they */ |
/* have. However, according to the UHCI spec, Bit 7 is always set */ |
/* to 1. So we try to use this to our advantage */ |
for (port = 0; port < (io_size - 0x10) / 2; port++) { |
unsigned int portstatus; |
portstatus = inw(uhci->io_addr + 0x10 + (port * 2)); |
if (!(portstatus & 0x0080)) |
break; |
} |
if (debug) |
info("detected %d ports", port); |
/* This is experimental so anything less than 2 or greater than 8 is */ |
/* something weird and we'll ignore it */ |
if (port < 2 || port > 8) { |
info("port count misdetected? forcing to 2 ports"); |
port = 2; |
} |
uhci->rh_numports = port; |
hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self); |
if (!udev) { |
err("unable to allocate root hub"); |
goto err_alloc_root_hub; |
} |
uhci->term_td = uhci_alloc_td(uhci, udev); |
if (!uhci->term_td) { |
err("unable to allocate terminating TD"); |
goto err_alloc_term_td; |
} |
for (i = 0; i < UHCI_NUM_SKELQH; i++) { |
uhci->skelqh[i] = uhci_alloc_qh(uhci, udev); |
if (!uhci->skelqh[i]) { |
err("unable to allocate QH %d", i); |
goto err_alloc_skelqh; |
} |
} |
/* |
* 8 Interrupt queues; link int2 to int1, int4 to int2, etc |
* then link int1 to control and control to bulk |
*/ |
uhci->skel_int128_qh->link = cpu_to_le32(uhci->skel_int64_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int64_qh->link = cpu_to_le32(uhci->skel_int32_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int32_qh->link = cpu_to_le32(uhci->skel_int16_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int16_qh->link = cpu_to_le32(uhci->skel_int8_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int8_qh->link = cpu_to_le32(uhci->skel_int4_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int4_qh->link = cpu_to_le32(uhci->skel_int2_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int2_qh->link = cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; |
/* This dummy TD is to work around a bug in Intel PIIX controllers */ |
uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); |
uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); |
uhci->skel_term_qh->link = UHCI_PTR_TERM; |
uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); |
/* |
* Fill the frame list: make all entries point to |
* the proper interrupt queue. |
* |
* This is probably silly, but it's a simple way to |
* scatter the interrupt queues in a way that gives |
* us a reasonable dynamic range for irq latencies. |
*/ |
for (i = 0; i < UHCI_NUMFRAMES; i++) { |
int irq = 0; |
if (i & 1) { |
irq++; |
if (i & 2) { |
irq++; |
if (i & 4) { |
irq++; |
if (i & 8) { |
irq++; |
if (i & 16) { |
irq++; |
if (i & 32) { |
irq++; |
if (i & 64) |
irq++; |
} |
} |
} |
} |
} |
} |
/* Only place we don't use the frame list routines */ |
uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[7 - irq]->dma_handle); |
} |
start_hc(uhci); |
init_stall_timer(hcd); |
udev->speed = USB_SPEED_FULL; |
if (usb_register_root_hub(udev, &hcd->pdev->dev) != 0) { |
err("unable to start root hub"); |
retval = -ENOMEM; |
goto err_start_root_hub; |
} |
return 0; |
/* |
* error exits: |
*/ |
err_start_root_hub: |
reset_hc(uhci); |
del_timer_sync(&uhci->stall_timer); |
err_alloc_skelqh: |
for (i = 0; i < UHCI_NUM_SKELQH; i++) |
if (uhci->skelqh[i]) { |
uhci_free_qh(uhci, uhci->skelqh[i]); |
uhci->skelqh[i] = NULL; |
} |
uhci_free_td(uhci, uhci->term_td); |
uhci->term_td = NULL; |
err_alloc_term_td: |
usb_put_dev(udev); |
hcd->self.root_hub = NULL; |
err_alloc_root_hub: |
pci_pool_destroy(uhci->qh_pool); |
uhci->qh_pool = NULL; |
err_create_qh_pool: |
pci_pool_destroy(uhci->td_pool); |
uhci->td_pool = NULL; |
err_create_td_pool: |
pci_free_consistent(hcd->pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
uhci->fl = NULL; |
err_alloc_fl: |
#ifdef CONFIG_PROC_FS |
remove_proc_entry(hcd->self.bus_name, uhci_proc_root); |
uhci->proc_entry = NULL; |
err_create_proc_entry: |
#endif |
return retval; |
} |
static void uhci_stop(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
del_timer_sync(&uhci->stall_timer); |
/* |
* At this point, we're guaranteed that no new connects can be made |
* to this bus since there are no more parents |
*/ |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
uhci_remove_pending_qhs(uhci); |
reset_hc(uhci); |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
release_uhci(uhci); |
} |
#ifdef CONFIG_PM |
static int uhci_suspend(struct usb_hcd *hcd, u32 state) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
/* Don't try to suspend broken motherboards, reset instead */ |
if (suspend_allowed(uhci)) |
suspend_hc(uhci); |
else |
reset_hc(uhci); |
return 0; |
} |
static int uhci_resume(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
pci_set_master(uhci->hcd.pdev); |
if (uhci->state == UHCI_SUSPENDED) |
uhci->resume_detect = 1; |
else { |
reset_hc(uhci); |
start_hc(uhci); |
} |
uhci->hcd.state = USB_STATE_RUNNING; |
return 0; |
} |
#endif |
static struct usb_hcd *uhci_hcd_alloc(void) |
{ |
struct uhci_hcd *uhci; |
uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL); |
if (!uhci) |
return NULL; |
memset(uhci, 0, sizeof(*uhci)); |
uhci->hcd.product_desc = "UHCI Host Controller"; |
return &uhci->hcd; |
} |
static void uhci_hcd_free(struct usb_hcd *hcd) |
{ |
kfree(hcd_to_uhci(hcd)); |
} |
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) |
{ |
return uhci_get_current_frame_number(hcd_to_uhci(hcd)); |
} |
static const char hcd_name[] = "uhci_hcd"; |
static const struct hc_driver uhci_driver = { |
.description = hcd_name, |
/* Generic hardware linkage */ |
.irq = uhci_irq, |
.flags = HCD_USB11, |
/* Basic lifecycle operations */ |
.reset = uhci_reset, |
.start = uhci_start, |
#ifdef CONFIG_PM |
.suspend = uhci_suspend, |
.resume = uhci_resume, |
#endif |
.stop = uhci_stop, |
.hcd_alloc = uhci_hcd_alloc, |
.hcd_free = uhci_hcd_free, |
.urb_enqueue = uhci_urb_enqueue, |
.urb_dequeue = uhci_urb_dequeue, |
.get_frame_number = uhci_hcd_get_frame_number, |
.hub_status_data = uhci_hub_status_data, |
.hub_control = uhci_hub_control, |
}; |
static const struct pci_device_id uhci_pci_ids[] = { { |
/* handle any USB UHCI controller */ |
PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), |
.driver_data = (unsigned long) &uhci_driver, |
}, { /* end: all zeroes */ } |
}; |
MODULE_DEVICE_TABLE(pci, uhci_pci_ids); |
static struct pci_driver uhci_pci_driver = { |
.name = (char *)hcd_name, |
.id_table = uhci_pci_ids, |
.probe = usb_hcd_pci_probe, |
.remove = usb_hcd_pci_remove, |
#ifdef CONFIG_PM |
.suspend = usb_hcd_pci_suspend, |
.resume = usb_hcd_pci_resume, |
#endif /* PM */ |
}; |
/*static*/ int __init uhci_hcd_init(void) |
{ |
int retval = -ENOMEM; |
info(DRIVER_DESC " " DRIVER_VERSION); |
if (usb_disabled()) |
return -ENODEV; |
if (debug) { |
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); |
if (!errbuf) |
goto errbuf_failed; |
} |
#ifdef CONFIG_PROC_FS |
uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0); |
if (!uhci_proc_root) |
goto proc_failed; |
#endif |
//** uhci_up_cachep = kmem_cache_create("uhci_urb_priv", |
//** sizeof(struct urb_priv), 0, 0, NULL, NULL); |
//** if (!uhci_up_cachep) |
//** goto up_failed; |
retval = pci_module_init(&uhci_pci_driver); |
if (retval) |
goto init_failed; |
return 0; |
init_failed: |
//** if (kmem_cache_destroy(uhci_up_cachep)) |
//** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
up_failed: |
#ifdef CONFIG_PROC_FS |
remove_proc_entry("driver/uhci", 0); |
proc_failed: |
#endif |
if (errbuf) |
kfree(errbuf); |
errbuf_failed: |
return retval; |
} |
/*static*/ void __exit uhci_hcd_cleanup(void) |
{ |
pci_unregister_driver(&uhci_pci_driver); |
//** if (kmem_cache_destroy(uhci_up_cachep)) |
//** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
#ifdef CONFIG_PROC_FS |
remove_proc_entry("driver/uhci", 0); |
#endif |
if (errbuf) |
kfree(errbuf); |
} |
module_init(uhci_hcd_init); |
module_exit(uhci_hcd_cleanup); |
MODULE_AUTHOR(DRIVER_AUTHOR); |
MODULE_DESCRIPTION(DRIVER_DESC); |
MODULE_LICENSE("GPL"); |
/* |
* Universal Host Controller Interface driver for USB. |
* |
* Maintainer: Johannes Erdfelt <johannes@erdfelt.com> |
* |
* (C) Copyright 1999 Linus Torvalds |
* (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com |
* (C) Copyright 1999 Randy Dunlap |
* (C) Copyright 1999 Georg Acher, acher@in.tum.de |
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de |
* (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch |
* (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at |
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
* support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
* |
* Intel documents this fairly well, and as far as I know there |
* are no royalties or anything like that, but even so there are |
* people who decided that they want to do the same thing in a |
* completely different way. |
* |
* WARNING! The USB documentation is downright evil. Most of it |
* is just crap, written by a committee. You're better off ignoring |
* most of it, the important stuff is: |
* - the low-level protocol (fairly simple but lots of small details) |
* - working around the horridness of the rest |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#include <linux/module.h> |
#include <linux/pci.h> |
#include <linux/kernel.h> |
#include <linux/init.h> |
#include <linux/delay.h> |
#include <linux/ioport.h> |
#include <linux/sched.h> |
#include <linux/slab.h> |
#include <linux/smp_lock.h> |
#include <linux/errno.h> |
#include <linux/unistd.h> |
#include <linux/interrupt.h> |
#include <linux/spinlock.h> |
#include <linux/proc_fs.h> |
#ifdef CONFIG_USB_DEBUG |
#define DEBUG |
#else |
#undef DEBUG |
#endif |
#include <linux/usb.h> |
#include <asm/uaccess.h> |
#include <asm/io.h> |
#include <asm/irq.h> |
#include <asm/system.h> |
#include "../core/hcd.h" |
#include "uhci-hcd.h" |
#include <linux/pm.h> |
/* |
* Version Information |
*/ |
#define DRIVER_VERSION "v2.1" |
#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber" |
#define DRIVER_DESC "USB Universal Host Controller Interface driver" |
/* |
* debug = 0, no debugging messages |
* debug = 1, dump failed URB's except for stalls |
* debug = 2, dump all failed URB's (including stalls) |
* show all queues in /proc/driver/uhci/[pci_addr] |
* debug = 3, show all TD's in URB's when dumping |
*/ |
#ifdef DEBUG |
static int debug = 3; |
#else |
static int debug = 0; |
#endif |
MODULE_PARM(debug, "i"); |
MODULE_PARM_DESC(debug, "Debug level"); |
static char *errbuf; |
#define ERRBUF_LEN (PAGE_SIZE * 8) |
#include "uhci-hub.c" |
#include "uhci-debug.c" |
static kmem_cache_t *uhci_up_cachep; /* urb_priv */ |
static int uhci_get_current_frame_number(struct uhci_hcd *uhci); |
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb); |
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb); |
static void hc_state_transitions(struct uhci_hcd *uhci); |
/* If a transfer is still active after this much time, turn off FSBR */ |
#define IDLE_TIMEOUT (HZ / 20) /* 50 ms */ |
#define FSBR_DELAY (HZ / 20) /* 50 ms */ |
/* When we timeout an idle transfer for FSBR, we'll switch it over to */ |
/* depth first traversal. We'll do it in groups of this number of TD's */ |
/* to make sure it doesn't hog all of the bandwidth */ |
#define DEPTH_INTERVAL 5 |
/* |
* Technically, updating td->status here is a race, but it's not really a |
* problem. The worst that can happen is that we set the IOC bit again |
* generating a spurious interrupt. We could fix this by creating another |
* QH and leaving the IOC bit always set, but then we would have to play |
* games with the FSBR code to make sure we get the correct order in all |
* the cases. I don't think it's worth the effort |
*/ |
static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static inline void uhci_add_complete(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
unsigned long flags; |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
list_add_tail(&urbp->complete_list, &uhci->complete_list); |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
} |
static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev) |
{ |
dma_addr_t dma_handle; |
struct uhci_td *td; |
td = pci_pool_alloc_usb(uhci->td_pool, GFP_ATOMIC, &dma_handle); |
if (!td) |
return NULL; |
td->dma_handle = dma_handle; |
td->link = UHCI_PTR_TERM; |
td->buffer = 0; |
td->frame = -1; |
td->dev = dev; |
INIT_LIST_HEAD(&td->list); |
INIT_LIST_HEAD(&td->remove_list); |
INIT_LIST_HEAD(&td->fl_list); |
usb_get_dev(dev); |
return td; |
} |
static inline void uhci_fill_td(struct uhci_td *td, __u32 status, |
__u32 token, __u32 buffer) |
{ |
td->status = cpu_to_le32(status); |
td->token = cpu_to_le32(token); |
td->buffer = cpu_to_le32(buffer); |
} |
/* |
* We insert Isochronous URB's directly into the frame list at the beginning |
*/ |
static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) |
{ |
unsigned long flags; |
framenum %= UHCI_NUMFRAMES; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
td->frame = framenum; |
/* Is there a TD already mapped there? */ |
if (uhci->fl->frame_cpu[framenum]) { |
struct uhci_td *ftd, *ltd; |
ftd = uhci->fl->frame_cpu[framenum]; |
ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
list_add_tail(&td->fl_list, &ftd->fl_list); |
td->link = ltd->link; |
mb(); |
ltd->link = cpu_to_le32(td->dma_handle); |
} else { |
td->link = uhci->fl->frame[framenum]; |
mb(); |
uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle); |
uhci->fl->frame_cpu[framenum] = td; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td) |
{ |
unsigned long flags; |
/* If it's not inserted, don't remove it */ |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (td->frame == -1 && list_empty(&td->fl_list)) |
goto out; |
if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) { |
if (list_empty(&td->fl_list)) { |
uhci->fl->frame[td->frame] = td->link; |
uhci->fl->frame_cpu[td->frame] = NULL; |
} else { |
struct uhci_td *ntd; |
ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); |
uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
uhci->fl->frame_cpu[td->frame] = ntd; |
} |
} else { |
struct uhci_td *ptd; |
ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); |
ptd->link = td->link; |
} |
mb(); |
td->link = UHCI_PTR_TERM; |
list_del_init(&td->fl_list); |
td->frame = -1; |
out: |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Inserts a td into qh list at the top. |
*/ |
static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_td *td, *ptd; |
if (list_empty(&urbp->td_list)) |
return; |
head = &urbp->td_list; |
tmp = head->next; |
/* Ordering isn't important here yet since the QH hasn't been */ |
/* inserted into the schedule yet */ |
td = list_entry(tmp, struct uhci_td, list); |
/* Add the first TD to the QH element pointer */ |
qh->element = cpu_to_le32(td->dma_handle) | breadth; |
ptd = td; |
/* Then link the rest of the TD's */ |
tmp = tmp->next; |
while (tmp != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
ptd->link = cpu_to_le32(td->dma_handle) | breadth; |
ptd = td; |
} |
ptd->link = UHCI_PTR_TERM; |
} |
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
{ |
if (!list_empty(&td->list)) |
dbg("td %p is still in list!", td); |
if (!list_empty(&td->remove_list)) |
dbg("td %p still in remove_list!", td); |
if (!list_empty(&td->fl_list)) |
dbg("td %p is still in fl_list!", td); |
if (td->dev) |
usb_put_dev(td->dev); |
pci_pool_free(uhci->td_pool, td, td->dma_handle); |
} |
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev) |
{ |
dma_addr_t dma_handle; |
struct uhci_qh *qh; |
qh = pci_pool_alloc_usb(uhci->qh_pool, GFP_ATOMIC, &dma_handle); |
if (!qh) |
return NULL; |
qh->dma_handle = dma_handle; |
qh->element = UHCI_PTR_TERM; |
qh->link = UHCI_PTR_TERM; |
qh->dev = dev; |
qh->urbp = NULL; |
INIT_LIST_HEAD(&qh->list); |
INIT_LIST_HEAD(&qh->remove_list); |
usb_get_dev(dev); |
return qh; |
} |
static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
{ |
if (!list_empty(&qh->list)) |
dbg("qh %p list not empty!", qh); |
if (!list_empty(&qh->remove_list)) |
dbg("qh %p still in remove_list!", qh); |
if (qh->dev) |
usb_put_dev(qh->dev); |
pci_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
} |
/* |
* Append this urb's qh after the last qh in skelqh->list |
* MUST be called with uhci->frame_list_lock acquired |
* |
* Note that urb_priv.queue_list doesn't have a separate queue head; |
* it's a ring with every element "live". |
*/ |
static void _uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *tmp; |
struct uhci_qh *lqh; |
/* Grab the last QH */ |
lqh = list_entry(skelqh->list.prev, struct uhci_qh, list); |
/* |
* Patch this endpoint's URB's QHs to point to the next skelqh: |
* skelqh --> ... lqh --> newqh --> next skelqh |
* Do this first, so the HC always sees the right QH after this one. |
*/ |
list_for_each (tmp, &urbp->queue_list) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
turbp->qh->link = lqh->link; |
} |
urbp->qh->link = lqh->link; |
wmb(); /* Ordering is important */ |
/* |
* Patch QHs for previous endpoint's queued URBs? HC goes |
* here next, not to the next skelqh it now points to. |
* |
* lqh --> td ... --> qh ... --> td --> qh ... --> td |
* | | | |
* v v v |
* +<----------------+-----------------+ |
* v |
* newqh --> td ... --> td |
* | |
* v |
* ... |
* |
* The HC could see (and use!) any of these as we write them. |
*/ |
if (lqh->urbp) { |
list_for_each (tmp, &lqh->urbp->queue_list) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
} |
} |
lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&urbp->qh->list, &skelqh->list); |
} |
static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
{ |
unsigned long flags; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
_uhci_insert_qh(uhci, skelqh, urb); |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Start removal of QH from schedule; it finishes next frame. |
* TDs should be unlinked before this is called. |
*/ |
static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
{ |
unsigned long flags; |
struct uhci_qh *pqh; |
if (!qh) |
return; |
qh->urbp = NULL; |
/* |
* Only go through the hoops if it's actually linked in |
* Queued QHs are removed in uhci_delete_queued_urb, |
* since (for queued URBs) the pqh is pointed to the next |
* QH in the queue, not the next endpoint's QH. |
*/ |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (!list_empty(&qh->list)) { |
pqh = list_entry(qh->list.prev, struct uhci_qh, list); |
if (pqh->urbp) { |
struct list_head *head, *tmp; |
head = &pqh->urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
turbp->qh->link = qh->link; |
} |
} |
pqh->link = qh->link; |
mb(); |
/* Leave qh->link in case the HC is on the QH now, it will */ |
/* continue the rest of the schedule */ |
qh->element = UHCI_PTR_TERM; |
list_del_init(&qh->list); |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
/* Check to see if the remove list is empty. Set the IOC bit */ |
/* to force an interrupt so we can remove the QH */ |
if (list_empty(&uhci->qh_remove_list)) |
uhci_set_next_interrupt(uhci); |
list_add(&qh->remove_list, &uhci->qh_remove_list); |
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
} |
static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *head, *tmp; |
head = &urbp->td_list; |
tmp = head->next; |
while (head != tmp) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
if (toggle) |
td->token |= cpu_to_le32(TD_TOKEN_TOGGLE); |
else |
td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE); |
toggle ^= 1; |
} |
return toggle; |
} |
/* This function will append one URB's QH to another URB's QH. This is for */ |
/* queuing interrupt, control or bulk transfers */ |
static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb) |
{ |
struct urb_priv *eurbp, *urbp, *furbp, *lurbp; |
struct list_head *tmp; |
struct uhci_td *lltd; |
unsigned long flags; |
eurbp = eurb->hcpriv; |
urbp = urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
/* Find the first URB in the queue */ |
if (eurbp->queued) { |
struct list_head *head = &eurbp->queue_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
if (!turbp->queued) |
break; |
tmp = tmp->next; |
} |
} else |
tmp = &eurbp->queue_list; |
furbp = list_entry(tmp, struct urb_priv, queue_list); |
lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list); |
lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list); |
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), |
uhci_fixup_toggle(urb, uhci_toggle(td_token(lltd)) ^ 1)); |
/* All qh's in the queue need to link to the next queue */ |
urbp->qh->link = eurbp->qh->link; |
mb(); /* Make sure we flush everything */ |
lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&urbp->queue_list, &furbp->queue_list); |
urbp->queued = 1; |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp, *nurbp; |
struct list_head *head, *tmp; |
struct urb_priv *purbp; |
struct uhci_td *pltd; |
unsigned int toggle; |
unsigned long flags; |
urbp = urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if (list_empty(&urbp->queue_list)) |
goto out; |
nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list); |
/* Fix up the toggle for the next URB's */ |
if (!urbp->queued) |
/* We just set the toggle in uhci_unlink_generic */ |
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); |
else { |
/* If we're in the middle of the queue, grab the toggle */ |
/* from the TD previous to us */ |
purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
queue_list); |
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
toggle = uhci_toggle(td_token(pltd)) ^ 1; |
} |
head = &urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp; |
turbp = list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
if (!turbp->queued) |
break; |
toggle = uhci_fixup_toggle(turbp->urb, toggle); |
} |
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe), toggle); |
if (!urbp->queued) { |
struct uhci_qh *pqh; |
nurbp->queued = 0; |
/* |
* Fixup the previous QH's queue to link to the new head |
* of this queue. |
*/ |
pqh = list_entry(urbp->qh->list.prev, struct uhci_qh, list); |
if (pqh->urbp) { |
struct list_head *head, *tmp; |
head = &pqh->urbp->queue_list; |
tmp = head->next; |
while (head != tmp) { |
struct urb_priv *turbp = |
list_entry(tmp, struct urb_priv, queue_list); |
tmp = tmp->next; |
turbp->qh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
} |
} |
pqh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
list_add_tail(&nurbp->qh->list, &urbp->qh->list); |
list_del_init(&urbp->qh->list); |
} else { |
/* We're somewhere in the middle (or end). A bit trickier */ |
/* than the head scenario */ |
purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
queue_list); |
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
if (nurbp->queued) |
pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
else |
/* The next URB happens to be the beginning, so */ |
/* we're the last, end the chain */ |
pltd->link = UHCI_PTR_TERM; |
} |
list_del_init(&urbp->queue_list); |
out: |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
extern void* malloc(int size); |
static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp; |
urbp = malloc(sizeof(struct urb_priv)); //**kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); |
if (!urbp) { |
err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n"); |
return NULL; |
} |
memset((void *)urbp, 0, sizeof(*urbp)); |
urbp->inserttime = jiffies26; |
urbp->fsbrtime = jiffies26; |
urbp->urb = urb; |
urbp->dev = urb->dev; |
INIT_LIST_HEAD(&urbp->td_list); |
INIT_LIST_HEAD(&urbp->queue_list); |
INIT_LIST_HEAD(&urbp->complete_list); |
INIT_LIST_HEAD(&urbp->urb_list); |
list_add_tail(&urbp->urb_list, &uhci->urb_list); |
urb->hcpriv = urbp; |
return urbp; |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
td->urb = urb; |
list_add_tail(&td->list, &urbp->td_list); |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_remove_td_from_urb(struct uhci_td *td) |
{ |
if (list_empty(&td->list)) |
return; |
list_del_init(&td->list); |
td->urb = NULL; |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *head, *tmp; |
struct urb_priv *urbp; |
unsigned long flags; |
urbp = (struct urb_priv *)urb->hcpriv; |
if (!urbp) |
return; |
if (!list_empty(&urbp->urb_list)) |
warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb); |
if (!list_empty(&urbp->complete_list)) |
warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb); |
spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
/* Check to see if the remove list is empty. Set the IOC bit */ |
/* to force an interrupt so we can remove the TD's*/ |
if (list_empty(&uhci->td_remove_list)) |
uhci_set_next_interrupt(uhci); |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
uhci_remove_td_from_urb(td); |
uhci_remove_td(uhci, td); |
list_add(&td->remove_list, &uhci->td_remove_list); |
} |
spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
urb->hcpriv = NULL; |
//**kmem_cache_free(uhci_up_cachep, urbp); |
free(urbp); |
} |
static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
{ |
unsigned long flags; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { |
urbp->fsbr = 1; |
if (!uhci->fsbr++ && !uhci->fsbrtimeout) |
uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
{ |
unsigned long flags; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
spin_lock_irqsave(&uhci->frame_list_lock, flags); |
if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { |
urbp->fsbr = 0; |
if (!--uhci->fsbr) |
uhci->fsbrtimeout = jiffies26 + FSBR_DELAY; |
} |
spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
} |
/* |
* Map status to standard result codes |
* |
* <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)] |
* <dir_out> is True for output TDs and False for input TDs. |
*/ |
static int uhci_map_status(int status, int dir_out) |
{ |
if (!status) |
return 0; |
if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ |
return -EPROTO; |
if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ |
if (dir_out) |
return -ETIMEDOUT; |
else |
return -EILSEQ; |
} |
if (status & TD_CTRL_NAK) /* NAK */ |
return -ETIMEDOUT; |
if (status & TD_CTRL_BABBLE) /* Babble */ |
return -EOVERFLOW; |
if (status & TD_CTRL_DBUFERR) /* Buffer error */ |
return -ENOSR; |
if (status & TD_CTRL_STALLED) /* Stalled */ |
return -EPIPE; |
if (status & TD_CTRL_ACTIVE) /* Active */ |
return 0; |
return -EINVAL; |
} |
/* |
* Control transfers |
*/ |
static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_td *td; |
struct uhci_qh *qh, *skelqh; |
unsigned long destination, status; |
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
int len = urb->transfer_buffer_length; |
dma_addr_t data = urb->transfer_dma; |
/* The "pipe" thing contains the destination in bits 8--18 */ |
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
/* 3 errors */ |
status = TD_CTRL_ACTIVE | uhci_maxerr(3); |
if (urb->dev->speed == USB_SPEED_LOW) |
status |= TD_CTRL_LS; |
/* |
* Build the TD for the control request |
*/ |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(7), |
urb->setup_dma); |
/* |
* If direction is "send", change the frame from SETUP (0x2D) |
* to OUT (0xE1). Else change it from SETUP to IN (0x69). |
*/ |
destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe)); |
if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
status |= TD_CTRL_SPD; |
/* |
* Build the DATA TD's |
*/ |
while (len > 0) { |
int pktsze = len; |
if (pktsze > maxsze) |
pktsze = maxsze; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
/* Alternate Data0/1 (start with Data1) */ |
destination ^= TD_TOKEN_TOGGLE; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1), |
data); |
data += pktsze; |
len -= pktsze; |
} |
/* |
* Build the final TD for control status |
*/ |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
/* |
* It's IN if the pipe is an output pipe or we're not expecting |
* data back. |
*/ |
destination &= ~TD_TOKEN_PID_MASK; |
if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) |
destination |= USB_PID_IN; |
else |
destination |= USB_PID_OUT; |
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
status &= ~TD_CTRL_SPD; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status | TD_CTRL_IOC, |
destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0); |
qh = uhci_alloc_qh(uhci, urb->dev); |
if (!qh) |
return -ENOMEM; |
urbp->qh = qh; |
qh->urbp = urbp; |
uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
/* Low speed transfers get a different queue, and won't hog the bus */ |
if (urb->dev->speed == USB_SPEED_LOW) |
skelqh = uhci->skel_ls_control_qh; |
else { |
skelqh = uhci->skel_hs_control_qh; |
uhci_inc_fsbr(uhci, urb); |
} |
if (eurb) |
uhci_append_queued_urb(uhci, eurb, urb); |
else |
uhci_insert_qh(uhci, skelqh, urb); |
return -EINPROGRESS; |
} |
/* |
* If control was short, then end status packet wasn't sent, so this |
* reorganize s so it's sent to finish the transfer. The original QH is |
* removed from the skel and discarded; all TDs except the last (status) |
* are deleted; the last (status) TD is put on a new QH which is reinserted |
* into the skel. Since the last TD and urb_priv are reused, the TD->link |
* and urb_priv maintain any queued QHs. |
*/ |
static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
urbp->short_control_packet = 1; |
/* Create a new QH to avoid pointer overwriting problems */ |
uhci_remove_qh(uhci, urbp->qh); |
/* Delete all of the TD's except for the status TD at the end */ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head && tmp->next != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
uhci_remove_td_from_urb(td); |
uhci_remove_td(uhci, td); |
uhci_free_td(uhci, td); |
} |
urbp->qh = uhci_alloc_qh(uhci, urb->dev); |
if (!urbp->qh) { |
err("unable to allocate new QH for control retrigger"); |
return -ENOMEM; |
} |
urbp->qh->urbp = urbp; |
/* One TD, who cares about Breadth first? */ |
uhci_insert_tds_in_qh(urbp->qh, urb, UHCI_PTR_DEPTH); |
/* Low speed transfers get a different queue */ |
if (urb->dev->speed == USB_SPEED_LOW) |
uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb); |
else |
uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb); |
return -EINPROGRESS; |
} |
static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = urb->hcpriv; |
struct uhci_td *td; |
unsigned int status; |
int ret = 0; |
if (list_empty(&urbp->td_list)) |
return -EINVAL; |
head = &urbp->td_list; |
if (urbp->short_control_packet) { |
tmp = head->prev; |
goto status_phase; |
} |
tmp = head->next; |
td = list_entry(tmp, struct uhci_td, list); |
/* The first TD is the SETUP phase, check the status, but skip */ |
/* the count */ |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
if (status) |
goto td_error; |
urb->actual_length = 0; |
/* The rest of the TD's (but the last) are data */ |
tmp = tmp->next; |
while (tmp != head && tmp->next != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
urb->actual_length += uhci_actual_length(td_status(td)); |
if (status) |
goto td_error; |
/* Check to see if we received a short packet */ |
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
ret = -EREMOTEIO; |
goto err; |
} |
if (uhci_packetid(td_token(td)) == USB_PID_IN) |
return usb_control_retrigger_status(uhci, urb); |
else |
return 0; |
} |
} |
status_phase: |
td = list_entry(tmp, struct uhci_td, list); |
/* Control status phase */ |
status = td_status(td); |
#ifdef I_HAVE_BUGGY_APC_BACKUPS |
/* APC BackUPS Pro kludge */ |
/* It tries to send all of the descriptor instead of the amount */ |
/* we requested */ |
if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ |
status & TD_CTRL_ACTIVE && |
status & TD_CTRL_NAK) |
return 0; |
#endif |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
if (uhci_status_bits(status)) |
goto td_error; |
return 0; |
td_error: |
ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
err: |
if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
/* Some debugging code */ |
dbg("uhci_result_control() failed with status %x", status); |
if (errbuf) { |
/* Print the chain for debugging purposes */ |
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
lprintk(errbuf); |
} |
} |
return ret; |
} |
/* |
* Common submit for bulk and interrupt |
*/ |
static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh) |
{ |
struct uhci_td *td; |
struct uhci_qh *qh; |
unsigned long destination, status; |
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
int len = urb->transfer_buffer_length; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
dma_addr_t data = urb->transfer_dma; |
if (len < 0) |
return -EINVAL; |
/* The "pipe" thing contains the destination in bits 8--18 */ |
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
status = uhci_maxerr(3) | TD_CTRL_ACTIVE; |
if (urb->dev->speed == USB_SPEED_LOW) |
status |= TD_CTRL_LS; |
if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
status |= TD_CTRL_SPD; |
/* |
* Build the DATA TD's |
*/ |
do { /* Allow zero length packets */ |
int pktsze = len; |
if (pktsze > maxsze) |
pktsze = maxsze; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) | |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
data); |
data += pktsze; |
len -= maxsze; |
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)); |
} while (len > 0); |
/* |
* URB_ZERO_PACKET means adding a 0-length packet, if direction |
* is OUT and the transfer_length was an exact multiple of maxsze, |
* hence (len = transfer_length - N * maxsze) == 0 |
* however, if transfer_length == 0, the zero packet was already |
* prepared above. |
*/ |
if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) && |
!len && urb->transfer_buffer_length) { |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) | |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
data); |
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
usb_pipeout(urb->pipe)); |
} |
/* Set the flag on the last packet */ |
td->status |= cpu_to_le32(TD_CTRL_IOC); |
qh = uhci_alloc_qh(uhci, urb->dev); |
if (!qh) |
return -ENOMEM; |
urbp->qh = qh; |
qh->urbp = urbp; |
/* Always breadth first */ |
uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
if (eurb) |
uhci_append_queued_urb(uhci, eurb, urb); |
else |
uhci_insert_qh(uhci, skelqh, urb); |
return -EINPROGRESS; |
} |
/* |
* Common result for bulk and interrupt |
*/ |
static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = urb->hcpriv; |
struct uhci_td *td; |
unsigned int status = 0; |
int ret = 0; |
urb->actual_length = 0; |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
status = uhci_status_bits(td_status(td)); |
if (status & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
urb->actual_length += uhci_actual_length(td_status(td)); |
if (status) |
goto td_error; |
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
ret = -EREMOTEIO; |
goto err; |
} else |
return 0; |
} |
} |
return 0; |
td_error: |
ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
if (ret == -EPIPE) |
/* endpoint has stalled - mark it halted */ |
usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td))); |
err: |
/* |
* Enable this chunk of code if you want to see some more debugging. |
* But be careful, it has the tendancy to starve out khubd and prevent |
* disconnects from happening successfully if you have a slow debug |
* log interface (like a serial console. |
*/ |
#if 0 |
if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
/* Some debugging code */ |
dbg("uhci_result_common() failed with status %x", status); |
if (errbuf) { |
/* Print the chain for debugging purposes */ |
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
lprintk(errbuf); |
} |
} |
#endif |
return ret; |
} |
static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
int ret; |
/* Can't have low speed bulk transfers */ |
if (urb->dev->speed == USB_SPEED_LOW) |
return -EINVAL; |
ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh); |
if (ret == -EINPROGRESS) |
uhci_inc_fsbr(uhci, urb); |
return ret; |
} |
static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
{ |
/* USB 1.1 interrupt transfers only involve one packet per interval; |
* that's the uhci_submit_common() "breadth first" policy. Drivers |
* can submit urbs of any length, but longer ones might need many |
* intervals to complete. |
*/ |
return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]); |
} |
/* |
* Bulk and interrupt use common result |
*/ |
#define uhci_result_bulk uhci_result_common |
#define uhci_result_interrupt uhci_result_common |
/* |
* Isochronous transfers |
*/ |
static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) |
{ |
struct urb *last_urb = NULL; |
struct list_head *tmp, *head; |
int ret = 0; |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
/* look for pending URB's with identical pipe handle */ |
if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && |
(u->status == -EINPROGRESS) && (u != urb)) { |
if (!last_urb) |
*start = u->start_frame; |
last_urb = u; |
} |
} |
if (last_urb) { |
*end = (last_urb->start_frame + last_urb->number_of_packets * |
last_urb->interval) & (UHCI_NUMFRAMES-1); |
ret = 0; |
} else |
ret = -1; /* no previous urb found */ |
return ret; |
} |
static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb) |
{ |
int limits; |
unsigned int start = 0, end = 0; |
if (urb->number_of_packets > 900) /* 900? Why? */ |
return -EFBIG; |
limits = isochronous_find_limits(uhci, urb, &start, &end); |
if (urb->transfer_flags & URB_ISO_ASAP) { |
if (limits) { |
int curframe; |
curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES; |
urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES; |
} else |
urb->start_frame = end; |
} else { |
urb->start_frame %= UHCI_NUMFRAMES; |
/* FIXME: Sanity check */ |
} |
return 0; |
} |
/* |
* Isochronous transfers |
*/ |
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct uhci_td *td; |
int i, ret, frame; |
int status, destination; |
status = TD_CTRL_ACTIVE | TD_CTRL_IOS; |
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
ret = isochronous_find_start(uhci, urb); |
if (ret) |
return ret; |
frame = urb->start_frame; |
for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) { |
if (!urb->iso_frame_desc[i].length) |
continue; |
td = uhci_alloc_td(uhci, urb->dev); |
if (!td) |
return -ENOMEM; |
uhci_add_td_to_urb(urb, td); |
uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1), |
urb->transfer_dma + urb->iso_frame_desc[i].offset); |
if (i + 1 >= urb->number_of_packets) |
td->status |= cpu_to_le32(TD_CTRL_IOC); |
uhci_insert_td_frame_list(uhci, td, frame); |
} |
return -EINPROGRESS; |
} |
static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
int status; |
int i, ret = 0; |
urb->actual_length = 0; |
i = 0; |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
int actlength; |
tmp = tmp->next; |
if (td_status(td) & TD_CTRL_ACTIVE) |
return -EINPROGRESS; |
actlength = uhci_actual_length(td_status(td)); |
urb->iso_frame_desc[i].actual_length = actlength; |
urb->actual_length += actlength; |
status = uhci_map_status(uhci_status_bits(td_status(td)), usb_pipeout(urb->pipe)); |
urb->iso_frame_desc[i].status = status; |
if (status) { |
urb->error_count++; |
ret = status; |
} |
i++; |
} |
return ret; |
} |
/* |
* MUST be called with uhci->urb_list_lock acquired |
*/ |
static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *tmp, *head; |
/* We don't match Isoc transfers since they are special */ |
if (usb_pipeisoc(urb->pipe)) |
return NULL; |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
if (u->dev == urb->dev && u->status == -EINPROGRESS) { |
/* For control, ignore the direction */ |
if (usb_pipecontrol(urb->pipe) && |
(u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) |
return u; |
else if (u->pipe == urb->pipe) |
return u; |
} |
} |
return NULL; |
} |
static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags) |
{ |
int ret = -EINVAL; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned long flags; |
struct urb *eurb; |
int bustime; |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
eurb = uhci_find_urb_ep(uhci, urb); |
if (!uhci_alloc_urb_priv(uhci, urb)) { |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return -ENOMEM; |
} |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
ret = uhci_submit_control(uhci, urb, eurb); |
break; |
case PIPE_INTERRUPT: |
if (!eurb) { |
bustime = usb_check_bandwidth(urb->dev, urb); |
if (bustime < 0) |
ret = bustime; |
else { |
ret = uhci_submit_interrupt(uhci, urb, eurb); |
if (ret == -EINPROGRESS) |
usb_claim_bandwidth(urb->dev, urb, bustime, 0); |
} |
} else { /* inherit from parent */ |
urb->bandwidth = eurb->bandwidth; |
ret = uhci_submit_interrupt(uhci, urb, eurb); |
} |
break; |
case PIPE_BULK: |
ret = uhci_submit_bulk(uhci, urb, eurb); |
break; |
case PIPE_ISOCHRONOUS: |
bustime = usb_check_bandwidth(urb->dev, urb); |
if (bustime < 0) { |
ret = bustime; |
break; |
} |
ret = uhci_submit_isochronous(uhci, urb); |
if (ret == -EINPROGRESS) |
usb_claim_bandwidth(urb->dev, urb, bustime, 1); |
break; |
} |
if (ret != -EINPROGRESS) { |
/* Submit failed, so delete it from the urb_list */ |
struct urb_priv *urbp = urb->hcpriv; |
list_del_init(&urbp->urb_list); |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
uhci_destroy_urb_priv (uhci, urb); |
return ret; |
} |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return 0; |
} |
/* |
* Return the result of a transfer |
* |
* MUST be called with urb_list_lock acquired |
*/ |
static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) |
{ |
int ret = -EINVAL; |
unsigned long flags; |
struct urb_priv *urbp; |
spin_lock_irqsave(&urb->lock, flags); |
urbp = (struct urb_priv *)urb->hcpriv; |
if (urb->status != -EINPROGRESS) { |
info("uhci_transfer_result: called for URB %p not in flight?", urb); |
goto out; |
} |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
ret = uhci_result_control(uhci, urb); |
break; |
case PIPE_INTERRUPT: |
ret = uhci_result_interrupt(uhci, urb); |
break; |
case PIPE_BULK: |
ret = uhci_result_bulk(uhci, urb); |
break; |
case PIPE_ISOCHRONOUS: |
ret = uhci_result_isochronous(uhci, urb); |
break; |
} |
urbp->status = ret; |
if (ret == -EINPROGRESS) |
goto out; |
switch (usb_pipetype(urb->pipe)) { |
case PIPE_CONTROL: |
case PIPE_BULK: |
case PIPE_ISOCHRONOUS: |
/* Release bandwidth for Interrupt or Isoc. transfers */ |
/* Spinlock needed ? */ |
if (urb->bandwidth) |
usb_release_bandwidth(urb->dev, urb, 1); |
uhci_unlink_generic(uhci, urb); |
break; |
case PIPE_INTERRUPT: |
/* Release bandwidth for Interrupt or Isoc. transfers */ |
/* Make sure we don't release if we have a queued URB */ |
spin_lock(&uhci->frame_list_lock); |
/* Spinlock needed ? */ |
if (list_empty(&urbp->queue_list) && urb->bandwidth) |
usb_release_bandwidth(urb->dev, urb, 0); |
else |
/* bandwidth was passed on to queued URB, */ |
/* so don't let usb_unlink_urb() release it */ |
urb->bandwidth = 0; |
spin_unlock(&uhci->frame_list_lock); |
uhci_unlink_generic(uhci, urb); |
break; |
default: |
info("uhci_transfer_result: unknown pipe type %d for urb %p\n", |
usb_pipetype(urb->pipe), urb); |
} |
/* Remove it from uhci->urb_list */ |
list_del_init(&urbp->urb_list); |
uhci_add_complete(uhci, urb); |
out: |
spin_unlock_irqrestore(&urb->lock, flags); |
} |
/* |
* MUST be called with urb->lock acquired |
*/ |
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct list_head *head, *tmp; |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
int prevactive = 1; |
/* We can get called when urbp allocation fails, so check */ |
if (!urbp) |
return; |
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ |
/* |
* Now we need to find out what the last successful toggle was |
* so we can update the local data toggle for the next transfer |
* |
* There's 3 way's the last successful completed TD is found: |
* |
* 1) The TD is NOT active and the actual length < expected length |
* 2) The TD is NOT active and it's the last TD in the chain |
* 3) The TD is active and the previous TD is NOT active |
* |
* Control and Isochronous ignore the toggle, so this is safe |
* for all types |
*/ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
if (!(td_status(td) & TD_CTRL_ACTIVE) && |
(uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) || |
tmp == head)) |
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td)), |
uhci_toggle(td_token(td)) ^ 1); |
else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive) |
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
uhci_packetout(td_token(td)), |
uhci_toggle(td_token(td))); |
prevactive = td_status(td) & TD_CTRL_ACTIVE; |
} |
uhci_delete_queued_urb(uhci, urb); |
/* The interrupt loop will reclaim the QH's */ |
uhci_remove_qh(uhci, urbp->qh); |
urbp->qh = NULL; |
} |
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned long flags; |
struct urb_priv *urbp = urb->hcpriv; |
/* If this is an interrupt URB that is being killed in urb->complete, */ |
/* then just set its status and return */ |
if (!urbp) { |
urb->status = -ECONNRESET; |
return 0; |
} |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
list_del_init(&urbp->urb_list); |
uhci_unlink_generic(uhci, urb); |
spin_lock(&uhci->urb_remove_list_lock); |
/* If we're the first, set the next interrupt bit */ |
if (list_empty(&uhci->urb_remove_list)) |
uhci_set_next_interrupt(uhci); |
list_add(&urbp->urb_list, &uhci->urb_remove_list); |
spin_unlock(&uhci->urb_remove_list_lock); |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
return 0; |
} |
static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct list_head *head, *tmp; |
int count = 0; |
uhci_dec_fsbr(uhci, urb); |
urbp->fsbr_timeout = 1; |
/* |
* Ideally we would want to fix qh->element as well, but it's |
* read/write by the HC, so that can introduce a race. It's not |
* really worth the hassle |
*/ |
head = &urbp->td_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
tmp = tmp->next; |
/* |
* Make sure we don't do the last one (since it'll have the |
* TERM bit set) as well as we skip every so many TD's to |
* make sure it doesn't hog the bandwidth |
*/ |
if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1)) |
td->link |= UHCI_PTR_DEPTH; |
count++; |
} |
return 0; |
} |
/* |
* uhci_get_current_frame_number() |
* |
* returns the current frame number for a USB bus/controller. |
*/ |
static int uhci_get_current_frame_number(struct uhci_hcd *uhci) |
{ |
return inw(uhci->io_addr + USBFRNUM); |
} |
static int init_stall_timer(struct usb_hcd *hcd); |
static void stall_callback(unsigned long ptr) |
{ |
struct usb_hcd *hcd = (struct usb_hcd *)ptr; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
struct list_head list, *tmp, *head; |
unsigned long flags; |
INIT_LIST_HEAD(&list); |
spin_lock_irqsave(&uhci->urb_list_lock, flags); |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
spin_lock(&u->lock); |
/* Check if the FSBR timed out */ |
if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies26, up->fsbrtime + IDLE_TIMEOUT)) |
uhci_fsbr_timeout(uhci, u); |
/* Check if the URB timed out */ |
if (u->timeout && time_after_eq(jiffies26, up->inserttime + u->timeout)) |
list_move_tail(&up->urb_list, &list); |
spin_unlock(&u->lock); |
} |
spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
head = &list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *u = up->urb; |
tmp = tmp->next; |
uhci_urb_dequeue(hcd, u); |
} |
/* Really disable FSBR */ |
if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies26, uhci->fsbrtimeout)) { |
uhci->fsbrtimeout = 0; |
uhci->skel_term_qh->link = UHCI_PTR_TERM; |
} |
/* Poll for and perform state transitions */ |
hc_state_transitions(uhci); |
init_stall_timer(hcd); |
} |
static int init_stall_timer(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
init_timer(&uhci->stall_timer); |
uhci->stall_timer.function = stall_callback; |
uhci->stall_timer.data = (unsigned long)hcd; |
uhci->stall_timer.expires = jiffies26 + (HZ / 10); |
add_timer(&uhci->stall_timer); |
return 0; |
} |
static void uhci_free_pending_qhs(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
head = &uhci->qh_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list); |
tmp = tmp->next; |
list_del_init(&qh->remove_list); |
uhci_free_qh(uhci, qh); |
} |
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
} |
static void uhci_free_pending_tds(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
head = &uhci->td_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list); |
tmp = tmp->next; |
list_del_init(&td->remove_list); |
uhci_free_td(uhci, td); |
} |
spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
} |
static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) |
{ |
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
int status; |
unsigned long flags; |
spin_lock_irqsave(&urb->lock, flags); |
status = urbp->status; |
uhci_destroy_urb_priv(uhci, urb); |
if (urb->status != -ENOENT && urb->status != -ECONNRESET) |
urb->status = status; |
spin_unlock_irqrestore(&urb->lock, flags); |
usb_hcd_giveback_urb(hcd, urb, regs); |
} |
static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
head = &uhci->complete_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list); |
struct urb *urb = urbp->urb; |
list_del_init(&urbp->complete_list); |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
uhci_finish_urb(hcd, urb, regs); |
spin_lock_irqsave(&uhci->complete_list_lock, flags); |
head = &uhci->complete_list; |
tmp = head->next; |
} |
spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
} |
static void uhci_remove_pending_qhs(struct uhci_hcd *uhci) |
{ |
struct list_head *tmp, *head; |
unsigned long flags; |
spin_lock_irqsave(&uhci->urb_remove_list_lock, flags); |
head = &uhci->urb_remove_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *urb = urbp->urb; |
tmp = tmp->next; |
list_del_init(&urbp->urb_list); |
urbp->status = urb->status = -ECONNRESET; |
uhci_add_complete(uhci, urb); |
} |
spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags); |
} |
static void uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
unsigned int io_addr = uhci->io_addr; |
unsigned short status; |
struct list_head *tmp, *head; |
static int count =0; |
/* |
* Read the interrupt status, and write it back to clear the |
* interrupt cause |
*/ |
status = inw(io_addr + USBSTS); |
if (!status) /* shared interrupt, not mine */ |
return; |
outw(status, io_addr + USBSTS); /* Clear it */ |
// printk("%x uhci_irq\n", io_addr); |
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { |
if (status & USBSTS_HSE) |
{ |
err("%x: host system error, PCI problems?", io_addr); |
} |
if (status & USBSTS_HCPE) |
err("%x: host controller process error. something bad happened", io_addr); |
if ((status & USBSTS_HCH) && uhci->state > 0) { |
err("%x: host controller halted. very bad", io_addr); |
/* FIXME: Reset the controller, fix the offending TD */ |
} |
} |
if (status & USBSTS_RD) |
uhci->resume_detect = 1; |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
uhci_remove_pending_qhs(uhci); |
uhci_clear_next_interrupt(uhci); |
/* Walk the list of pending URB's to see which ones completed */ |
spin_lock(&uhci->urb_list_lock); |
head = &uhci->urb_list; |
tmp = head->next; |
while (tmp != head) { |
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
struct urb *urb = urbp->urb; |
tmp = tmp->next; |
/* Checks the status and does all of the magic necessary */ |
uhci_transfer_result(uhci, urb); |
} |
spin_unlock(&uhci->urb_list_lock); |
uhci_finish_completion(hcd, regs); |
} |
static void reset_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
/* Global reset for 50ms */ |
uhci->state = UHCI_RESET; |
outw(USBCMD_GRESET, io_addr + USBCMD); |
set_current_state(TASK_UNINTERRUPTIBLE); |
schedule_timeout((HZ*50+999) / 1000); |
outw(0, io_addr + USBCMD); |
/* Another 10ms delay */ |
set_current_state(TASK_UNINTERRUPTIBLE); |
schedule_timeout((HZ*10+999) / 1000); |
uhci->resume_detect = 0; |
} |
static void suspend_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
dbg("%x: suspend_hc", io_addr); |
uhci->state = UHCI_SUSPENDED; |
uhci->resume_detect = 0; |
outw(USBCMD_EGSM, io_addr + USBCMD); |
} |
static void wakeup_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
switch (uhci->state) { |
case UHCI_SUSPENDED: /* Start the resume */ |
dbg("%x: wakeup_hc", io_addr); |
/* Global resume for >= 20ms */ |
outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD); |
uhci->state = UHCI_RESUMING_1; |
uhci->state_end = jiffies26 + (20*HZ+999) / 1000; |
break; |
case UHCI_RESUMING_1: /* End global resume */ |
uhci->state = UHCI_RESUMING_2; |
outw(0, io_addr + USBCMD); |
/* Falls through */ |
case UHCI_RESUMING_2: /* Wait for EOP to be sent */ |
if (inw(io_addr + USBCMD) & USBCMD_FGR) |
break; |
/* Run for at least 1 second, and |
* mark it configured with a 64-byte max packet */ |
uhci->state = UHCI_RUNNING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, |
io_addr + USBCMD); |
break; |
case UHCI_RUNNING_GRACE: /* Now allowed to suspend */ |
uhci->state = UHCI_RUNNING; |
break; |
default: |
break; |
} |
} |
static int ports_active(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int connection = 0; |
int i; |
for (i = 0; i < uhci->rh_numports; i++) |
connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS); |
return connection; |
} |
static int suspend_allowed(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int i; |
if (!uhci->hcd.pdev || uhci->hcd.pdev->vendor != PCI_VENDOR_ID_INTEL) |
return 1; |
/* Some of Intel's USB controllers have a bug that causes false |
* resume indications if any port has an over current condition. |
* To prevent problems, we will not allow a global suspend if |
* any ports are OC. |
* |
* Some motherboards using Intel's chipsets (but not using all |
* the USB ports) appear to hardwire the over current inputs active |
* to disable the USB ports. |
*/ |
/* check for over current condition on any port */ |
for (i = 0; i < uhci->rh_numports; i++) { |
if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC) |
return 0; |
} |
return 1; |
} |
static void hc_state_transitions(struct uhci_hcd *uhci) |
{ |
switch (uhci->state) { |
case UHCI_RUNNING: |
/* global suspend if nothing connected for 1 second */ |
if (!ports_active(uhci) && suspend_allowed(uhci)) { |
uhci->state = UHCI_SUSPENDING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
} |
break; |
case UHCI_SUSPENDING_GRACE: |
if (ports_active(uhci)) |
uhci->state = UHCI_RUNNING; |
else if (time_after_eq(jiffies26, uhci->state_end)) |
suspend_hc(uhci); |
break; |
case UHCI_SUSPENDED: |
/* wakeup if requested by a device */ |
if (uhci->resume_detect) |
wakeup_hc(uhci); |
break; |
case UHCI_RESUMING_1: |
case UHCI_RESUMING_2: |
case UHCI_RUNNING_GRACE: |
if (time_after_eq(jiffies26, uhci->state_end)) |
wakeup_hc(uhci); |
break; |
default: |
break; |
} |
} |
static void start_hc(struct uhci_hcd *uhci) |
{ |
unsigned int io_addr = uhci->io_addr; |
int timeout = 1000; |
/* |
* Reset the HC - this will force us to get a |
* new notification of any already connected |
* ports due to the virtual disconnect that it |
* implies. |
*/ |
outw(USBCMD_HCRESET, io_addr + USBCMD); |
while (inw(io_addr + USBCMD) & USBCMD_HCRESET) { |
if (!--timeout) { |
printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n"); |
break; |
} |
} |
/* Turn on all interrupts */ |
outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, |
io_addr + USBINTR); |
/* Start at frame 0 */ |
outw(0, io_addr + USBFRNUM); |
outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD); |
/* Run and mark it configured with a 64-byte max packet */ |
uhci->state = UHCI_RUNNING_GRACE; |
uhci->state_end = jiffies26 + HZ; |
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD); |
uhci->hcd.state = USB_STATE_RUNNING; |
#ifdef DEB |
{ |
__u32 *tdp; |
int i; |
int status = inw(io_addr + USBSTS); |
printk(KERN_INFO "[%x] Frame = %d Status =%x fl=%x\n", io_addr, inw(io_addr + USBFRNUM), status, uhci->fl->dma_handle); |
for (i=0; i<20; i++) |
{ |
int status = inw(io_addr + USBSTS); |
wait_ms26(500); |
tdp=(__u32*)uhci->fl->frame[i]; |
printk(KERN_INFO "[%x] Frame[%d] -> @%x = %x status=%x fl=%x\n", io_addr, i, uhci->fl->frame[i], *tdp, status, uhci->fl->dma_handle ); |
} |
} |
#endif |
} |
/* |
* De-allocate all resources.. |
*/ |
static void release_uhci(struct uhci_hcd *uhci) |
{ |
int i; |
for (i = 0; i < UHCI_NUM_SKELQH; i++) |
if (uhci->skelqh[i]) { |
uhci_free_qh(uhci, uhci->skelqh[i]); |
uhci->skelqh[i] = NULL; |
} |
if (uhci->term_td) { |
uhci_free_td(uhci, uhci->term_td); |
uhci->term_td = NULL; |
} |
if (uhci->qh_pool) { |
pci_pool_destroy(uhci->qh_pool); |
uhci->qh_pool = NULL; |
} |
if (uhci->td_pool) { |
pci_pool_destroy(uhci->td_pool); |
uhci->td_pool = NULL; |
} |
if (uhci->fl) { |
pci_free_consistent(uhci->hcd.pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
uhci->fl = NULL; |
} |
#ifdef CONFIG_PROC_FS |
if (uhci->proc_entry) { |
remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root); |
uhci->proc_entry = NULL; |
} |
#endif |
} |
static int uhci_reset(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
uhci->io_addr = (unsigned long) hcd->regs; |
/* Turn off all interrupts 2.6.1 */ |
outw(0, uhci->io_addr + USBINTR); |
/* Maybe kick BIOS off this hardware. Then reset, so we won't get |
* interrupts from any previous setup. |
*/ |
reset_hc(uhci); |
pci_write_config_word(hcd->pdev, USBLEGSUP, USBLEGSUP_DEFAULT); |
return 0; |
} |
/* |
* Allocate a frame list, and then setup the skeleton |
* |
* The hardware doesn't really know any difference |
* in the queues, but the order does matter for the |
* protocols higher up. The order is: |
* |
* - any isochronous events handled before any |
* of the queues. We don't do that here, because |
* we'll create the actual TD entries on demand. |
* - The first queue is the interrupt queue. |
* - The second queue is the control queue, split into low and high speed |
* - The third queue is bulk queue. |
* - The fourth queue is the bandwidth reclamation queue, which loops back |
* to the high speed control queue. |
*/ |
static int uhci_start(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
int retval = -EBUSY; |
int i, port; |
unsigned io_size; |
dma_addr_t dma_handle; |
struct usb_device *udev; |
#ifdef CONFIG_PROC_FS |
struct proc_dir_entry *ent; |
#endif |
io_size = pci_resource_len(hcd->pdev, hcd->region); |
#ifdef CONFIG_PROC_FS |
ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root); |
if (!ent) { |
err("couldn't create uhci proc entry"); |
retval = -ENOMEM; |
goto err_create_proc_entry; |
} |
ent->data = uhci; |
ent->proc_fops = &uhci_proc_operations; |
ent->size = 0; |
uhci->proc_entry = ent; |
#endif |
uhci->fsbr = 0; |
uhci->fsbrtimeout = 0; |
spin_lock_init(&uhci->qh_remove_list_lock); |
INIT_LIST_HEAD(&uhci->qh_remove_list); |
spin_lock_init(&uhci->td_remove_list_lock); |
INIT_LIST_HEAD(&uhci->td_remove_list); |
spin_lock_init(&uhci->urb_remove_list_lock); |
INIT_LIST_HEAD(&uhci->urb_remove_list); |
spin_lock_init(&uhci->urb_list_lock); |
INIT_LIST_HEAD(&uhci->urb_list); |
spin_lock_init(&uhci->complete_list_lock); |
INIT_LIST_HEAD(&uhci->complete_list); |
spin_lock_init(&uhci->frame_list_lock); |
uhci->fl = pci_alloc_consistent_usb(hcd->pdev, sizeof(*uhci->fl), &dma_handle); |
if (!uhci->fl) { |
err("unable to allocate consistent memory for frame list"); |
goto err_alloc_fl; |
} |
memset((void *)uhci->fl, 0, sizeof(*uhci->fl)); |
uhci->fl->dma_handle = dma_handle; |
uhci->td_pool = pci_pool_create("uhci_td", hcd->pdev, |
sizeof(struct uhci_td), 16, 0); |
if (!uhci->td_pool) { |
err("unable to create td pci_pool"); |
goto err_create_td_pool; |
} |
uhci->qh_pool = pci_pool_create("uhci_qh", hcd->pdev, |
sizeof(struct uhci_qh), 16, 0); |
if (!uhci->qh_pool) { |
err("unable to create qh pci_pool"); |
goto err_create_qh_pool; |
} |
/* Initialize the root hub */ |
/* UHCI specs says devices must have 2 ports, but goes on to say */ |
/* they may have more but give no way to determine how many they */ |
/* have. However, according to the UHCI spec, Bit 7 is always set */ |
/* to 1. So we try to use this to our advantage */ |
for (port = 0; port < (io_size - 0x10) / 2; port++) { |
unsigned int portstatus; |
portstatus = inw(uhci->io_addr + 0x10 + (port * 2)); |
if (!(portstatus & 0x0080)) |
break; |
} |
if (debug) |
info("detected %d ports", port); |
/* This is experimental so anything less than 2 or greater than 8 is */ |
/* something weird and we'll ignore it */ |
if (port < 2 || port > 8) { |
info("port count misdetected? forcing to 2 ports"); |
port = 2; |
} |
uhci->rh_numports = port; |
hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self); |
if (!udev) { |
err("unable to allocate root hub"); |
goto err_alloc_root_hub; |
} |
uhci->term_td = uhci_alloc_td(uhci, udev); |
if (!uhci->term_td) { |
err("unable to allocate terminating TD"); |
goto err_alloc_term_td; |
} |
for (i = 0; i < UHCI_NUM_SKELQH; i++) { |
uhci->skelqh[i] = uhci_alloc_qh(uhci, udev); |
if (!uhci->skelqh[i]) { |
err("unable to allocate QH %d", i); |
goto err_alloc_skelqh; |
} |
} |
/* |
* 8 Interrupt queues; link int2 to int1, int4 to int2, etc |
* then link int1 to control and control to bulk |
*/ |
uhci->skel_int128_qh->link = cpu_to_le32(uhci->skel_int64_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int64_qh->link = cpu_to_le32(uhci->skel_int32_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int32_qh->link = cpu_to_le32(uhci->skel_int16_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int16_qh->link = cpu_to_le32(uhci->skel_int8_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int8_qh->link = cpu_to_le32(uhci->skel_int4_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int4_qh->link = cpu_to_le32(uhci->skel_int2_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int2_qh->link = cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; |
uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; |
/* This dummy TD is to work around a bug in Intel PIIX controllers */ |
uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); |
uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); |
uhci->skel_term_qh->link = UHCI_PTR_TERM; |
uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); |
/* |
* Fill the frame list: make all entries point to |
* the proper interrupt queue. |
* |
* This is probably silly, but it's a simple way to |
* scatter the interrupt queues in a way that gives |
* us a reasonable dynamic range for irq latencies. |
*/ |
for (i = 0; i < UHCI_NUMFRAMES; i++) { |
int irq = 0; |
if (i & 1) { |
irq++; |
if (i & 2) { |
irq++; |
if (i & 4) { |
irq++; |
if (i & 8) { |
irq++; |
if (i & 16) { |
irq++; |
if (i & 32) { |
irq++; |
if (i & 64) |
irq++; |
} |
} |
} |
} |
} |
} |
/* Only place we don't use the frame list routines */ |
uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[7 - irq]->dma_handle); |
} |
start_hc(uhci); |
init_stall_timer(hcd); |
udev->speed = USB_SPEED_FULL; |
if (usb_register_root_hub(udev, &hcd->pdev->dev) != 0) { |
err("unable to start root hub"); |
retval = -ENOMEM; |
goto err_start_root_hub; |
} |
return 0; |
/* |
* error exits: |
*/ |
err_start_root_hub: |
reset_hc(uhci); |
del_timer_sync(&uhci->stall_timer); |
err_alloc_skelqh: |
for (i = 0; i < UHCI_NUM_SKELQH; i++) |
if (uhci->skelqh[i]) { |
uhci_free_qh(uhci, uhci->skelqh[i]); |
uhci->skelqh[i] = NULL; |
} |
uhci_free_td(uhci, uhci->term_td); |
uhci->term_td = NULL; |
err_alloc_term_td: |
usb_put_dev(udev); |
hcd->self.root_hub = NULL; |
err_alloc_root_hub: |
pci_pool_destroy(uhci->qh_pool); |
uhci->qh_pool = NULL; |
err_create_qh_pool: |
pci_pool_destroy(uhci->td_pool); |
uhci->td_pool = NULL; |
err_create_td_pool: |
pci_free_consistent(hcd->pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
uhci->fl = NULL; |
err_alloc_fl: |
#ifdef CONFIG_PROC_FS |
remove_proc_entry(hcd->self.bus_name, uhci_proc_root); |
uhci->proc_entry = NULL; |
err_create_proc_entry: |
#endif |
return retval; |
} |
static void uhci_stop(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
del_timer_sync(&uhci->stall_timer); |
/* |
* At this point, we're guaranteed that no new connects can be made |
* to this bus since there are no more parents |
*/ |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
uhci_remove_pending_qhs(uhci); |
reset_hc(uhci); |
uhci_free_pending_qhs(uhci); |
uhci_free_pending_tds(uhci); |
release_uhci(uhci); |
} |
#ifdef CONFIG_PM |
static int uhci_suspend(struct usb_hcd *hcd, u32 state) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
/* Don't try to suspend broken motherboards, reset instead */ |
if (suspend_allowed(uhci)) |
suspend_hc(uhci); |
else |
reset_hc(uhci); |
return 0; |
} |
static int uhci_resume(struct usb_hcd *hcd) |
{ |
struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
pci_set_master(uhci->hcd.pdev); |
if (uhci->state == UHCI_SUSPENDED) |
uhci->resume_detect = 1; |
else { |
reset_hc(uhci); |
start_hc(uhci); |
} |
uhci->hcd.state = USB_STATE_RUNNING; |
return 0; |
} |
#endif |
static struct usb_hcd *uhci_hcd_alloc(void) |
{ |
struct uhci_hcd *uhci; |
uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL); |
if (!uhci) |
return NULL; |
memset(uhci, 0, sizeof(*uhci)); |
uhci->hcd.product_desc = "UHCI Host Controller"; |
return &uhci->hcd; |
} |
static void uhci_hcd_free(struct usb_hcd *hcd) |
{ |
kfree(hcd_to_uhci(hcd)); |
} |
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) |
{ |
return uhci_get_current_frame_number(hcd_to_uhci(hcd)); |
} |
static const char hcd_name[] = "uhci_hcd"; |
static const struct hc_driver uhci_driver = { |
.description = hcd_name, |
/* Generic hardware linkage */ |
.irq = uhci_irq, |
.flags = HCD_USB11, |
/* Basic lifecycle operations */ |
.reset = uhci_reset, |
.start = uhci_start, |
#ifdef CONFIG_PM |
.suspend = uhci_suspend, |
.resume = uhci_resume, |
#endif |
.stop = uhci_stop, |
.hcd_alloc = uhci_hcd_alloc, |
.hcd_free = uhci_hcd_free, |
.urb_enqueue = uhci_urb_enqueue, |
.urb_dequeue = uhci_urb_dequeue, |
.get_frame_number = uhci_hcd_get_frame_number, |
.hub_status_data = uhci_hub_status_data, |
.hub_control = uhci_hub_control, |
}; |
static const struct pci_device_id uhci_pci_ids[] = { { |
/* handle any USB UHCI controller */ |
PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), |
.driver_data = (unsigned long) &uhci_driver, |
}, { /* end: all zeroes */ } |
}; |
MODULE_DEVICE_TABLE(pci, uhci_pci_ids); |
static struct pci_driver uhci_pci_driver = { |
.name = (char *)hcd_name, |
.id_table = uhci_pci_ids, |
.probe = usb_hcd_pci_probe, |
.remove = usb_hcd_pci_remove, |
#ifdef CONFIG_PM |
.suspend = usb_hcd_pci_suspend, |
.resume = usb_hcd_pci_resume, |
#endif /* PM */ |
}; |
/*static*/ int __init uhci_hcd_init(void) |
{ |
int retval = -ENOMEM; |
info(DRIVER_DESC " " DRIVER_VERSION); |
if (usb_disabled()) |
return -ENODEV; |
if (debug) { |
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); |
if (!errbuf) |
goto errbuf_failed; |
} |
#ifdef CONFIG_PROC_FS |
uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0); |
if (!uhci_proc_root) |
goto proc_failed; |
#endif |
//** uhci_up_cachep = kmem_cache_create("uhci_urb_priv", |
//** sizeof(struct urb_priv), 0, 0, NULL, NULL); |
//** if (!uhci_up_cachep) |
//** goto up_failed; |
retval = pci_module_init(&uhci_pci_driver); |
if (retval) |
goto init_failed; |
return 0; |
init_failed: |
//** if (kmem_cache_destroy(uhci_up_cachep)) |
//** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
up_failed: |
#ifdef CONFIG_PROC_FS |
remove_proc_entry("driver/uhci", 0); |
proc_failed: |
#endif |
if (errbuf) |
kfree(errbuf); |
errbuf_failed: |
return retval; |
} |
/*static*/ void __exit uhci_hcd_cleanup(void) |
{ |
pci_unregister_driver(&uhci_pci_driver); |
//** if (kmem_cache_destroy(uhci_up_cachep)) |
//** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
#ifdef CONFIG_PROC_FS |
remove_proc_entry("driver/uhci", 0); |
#endif |
if (errbuf) |
kfree(errbuf); |
} |
module_init(uhci_hcd_init); |
module_exit(uhci_hcd_cleanup); |
MODULE_AUTHOR(DRIVER_AUTHOR); |
MODULE_DESCRIPTION(DRIVER_DESC); |
MODULE_LICENSE("GPL"); |
/shark/trunk/drivers/usb/host/ehci-mem.c |
---|
1,249 → 1,249 |
/* |
* Copyright (c) 2001 by David Brownell |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License as published by the |
* Free Software Foundation; either version 2 of the License, or (at your |
* option) any later version. |
* |
* This program is distributed in the hope that it will be useful, but |
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
* for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software Foundation, |
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
/* this file is part of ehci-hcd.c */ |
/*-------------------------------------------------------------------------*/ |
/* |
* There's basically three types of memory: |
* - data used only by the HCD ... kmalloc is fine |
* - async and periodic schedules, shared by HC and HCD ... these |
* need to use pci_pool or pci_alloc_consistent |
* - driver buffers, read/written by HC ... single shot DMA mapped |
* |
* There's also PCI "register" data, which is memory mapped. |
* No memory seen by this driver is pagable. |
*/ |
/*-------------------------------------------------------------------------*/ |
/* |
* Allocator / cleanup for the per device structure |
* Called by hcd init / removal code |
*/ |
static struct usb_hcd *ehci_hcd_alloc (void) |
{ |
struct ehci_hcd *ehci; |
ehci = (struct ehci_hcd *) |
kmalloc (sizeof (struct ehci_hcd), GFP_KERNEL); |
if (ehci != 0) { |
memset (ehci, 0, sizeof (struct ehci_hcd)); |
ehci->hcd.product_desc = "EHCI Host Controller"; |
return &ehci->hcd; |
} |
return 0; |
} |
static void ehci_hcd_free (struct usb_hcd *hcd) |
{ |
kfree (hcd_to_ehci (hcd)); |
} |
/*-------------------------------------------------------------------------*/ |
/* Allocate the key transfer structures from the previously allocated pool */ |
static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma) |
{ |
memset (qtd, 0, sizeof *qtd); |
qtd->qtd_dma = dma; |
qtd->hw_token = cpu_to_le32 (QTD_STS_HALT); |
qtd->hw_next = EHCI_LIST_END; |
qtd->hw_alt_next = EHCI_LIST_END; |
INIT_LIST_HEAD (&qtd->qtd_list); |
} |
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags) |
{ |
struct ehci_qtd *qtd; |
dma_addr_t dma; |
qtd = pci_pool_alloc (ehci->qtd_pool, flags, &dma); |
if (qtd != 0) { |
ehci_qtd_init (qtd, dma); |
} |
return qtd; |
} |
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) |
{ |
pci_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); |
} |
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) |
{ |
struct ehci_qh *qh; |
dma_addr_t dma; |
qh = (struct ehci_qh *) |
pci_pool_alloc (ehci->qh_pool, flags, &dma); |
if (!qh) |
return qh; |
memset (qh, 0, sizeof *qh); |
atomic_set (&qh->refcount, 1); |
qh->qh_dma = dma; |
// INIT_LIST_HEAD (&qh->qh_list); |
INIT_LIST_HEAD (&qh->qtd_list); |
/* dummy td enables safe urb queuing */ |
qh->dummy = ehci_qtd_alloc (ehci, flags); |
if (qh->dummy == 0) { |
ehci_dbg (ehci, "no dummy td\n"); |
pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); |
qh = 0; |
} |
return qh; |
} |
/* to share a qh (cpu threads, or hc) */ |
static inline struct ehci_qh *qh_get (/* ehci, */ struct ehci_qh *qh) |
{ |
atomic_inc (&qh->refcount); |
return qh; |
} |
static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh) |
{ |
if (!atomic_dec_and_test (&qh->refcount)) |
return; |
/* clean qtds first, and know this is not linked */ |
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { |
ehci_dbg (ehci, "unused qh not empty!\n"); |
BUG (); |
} |
if (qh->dummy) |
ehci_qtd_free (ehci, qh->dummy); |
pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); |
} |
/*-------------------------------------------------------------------------*/ |
/* The queue heads and transfer descriptors are managed from pools tied |
* to each of the "per device" structures. |
* This is the initialisation and cleanup code. |
*/ |
static void ehci_mem_cleanup (struct ehci_hcd *ehci) |
{ |
if (ehci->async) |
qh_put (ehci, ehci->async); |
ehci->async = 0; |
/* PCI consistent memory and pools */ |
if (ehci->qtd_pool) |
pci_pool_destroy (ehci->qtd_pool); |
ehci->qtd_pool = 0; |
if (ehci->qh_pool) { |
pci_pool_destroy (ehci->qh_pool); |
ehci->qh_pool = 0; |
} |
if (ehci->itd_pool) |
pci_pool_destroy (ehci->itd_pool); |
ehci->itd_pool = 0; |
if (ehci->sitd_pool) |
pci_pool_destroy (ehci->sitd_pool); |
ehci->sitd_pool = 0; |
if (ehci->periodic) |
pci_free_consistent (ehci->hcd.pdev, |
ehci->periodic_size * sizeof (u32), |
ehci->periodic, ehci->periodic_dma); |
ehci->periodic = 0; |
/* shadow periodic table */ |
if (ehci->pshadow) |
kfree (ehci->pshadow); |
ehci->pshadow = 0; |
} |
/* remember to add cleanup code (above) if you add anything here */ |
static int ehci_mem_init (struct ehci_hcd *ehci, int flags) |
{ |
int i; |
/* QTDs for control/bulk/intr transfers */ |
ehci->qtd_pool = pci_pool_create ("ehci_qtd", ehci->hcd.pdev, |
sizeof (struct ehci_qtd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->qtd_pool) { |
goto fail; |
} |
/* QHs for control/bulk/intr transfers */ |
ehci->qh_pool = pci_pool_create ("ehci_qh", ehci->hcd.pdev, |
sizeof (struct ehci_qh), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->qh_pool) { |
goto fail; |
} |
ehci->async = ehci_qh_alloc (ehci, flags); |
if (!ehci->async) { |
goto fail; |
} |
/* ITD for high speed ISO transfers */ |
ehci->itd_pool = pci_pool_create ("ehci_itd", ehci->hcd.pdev, |
sizeof (struct ehci_itd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->itd_pool) { |
goto fail; |
} |
/* SITD for full/low speed split ISO transfers */ |
ehci->sitd_pool = pci_pool_create ("ehci_sitd", ehci->hcd.pdev, |
sizeof (struct ehci_sitd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->sitd_pool) { |
goto fail; |
} |
/* Hardware periodic table */ |
ehci->periodic = (u32 *) |
pci_alloc_consistent (ehci->hcd.pdev, |
ehci->periodic_size * sizeof (u32), |
&ehci->periodic_dma); |
if (ehci->periodic == 0) { |
goto fail; |
} |
for (i = 0; i < ehci->periodic_size; i++) |
ehci->periodic [i] = EHCI_LIST_END; |
/* software shadow of hardware table */ |
ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags); |
if (ehci->pshadow == 0) { |
goto fail; |
} |
memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *)); |
return 0; |
fail: |
ehci_dbg (ehci, "couldn't init memory\n"); |
ehci_mem_cleanup (ehci); |
return -ENOMEM; |
} |
/* |
* Copyright (c) 2001 by David Brownell |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License as published by the |
* Free Software Foundation; either version 2 of the License, or (at your |
* option) any later version. |
* |
* This program is distributed in the hope that it will be useful, but |
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
* for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software Foundation, |
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
/* this file is part of ehci-hcd.c */ |
/*-------------------------------------------------------------------------*/ |
/* |
* There's basically three types of memory: |
* - data used only by the HCD ... kmalloc is fine |
* - async and periodic schedules, shared by HC and HCD ... these |
* need to use pci_pool or pci_alloc_consistent_usb |
* - driver buffers, read/written by HC ... single shot DMA mapped |
* |
* There's also PCI "register" data, which is memory mapped. |
* No memory seen by this driver is pagable. |
*/ |
/*-------------------------------------------------------------------------*/ |
/* |
* Allocator / cleanup for the per device structure |
* Called by hcd init / removal code |
*/ |
static struct usb_hcd *ehci_hcd_alloc (void) |
{ |
struct ehci_hcd *ehci; |
ehci = (struct ehci_hcd *) |
kmalloc (sizeof (struct ehci_hcd), GFP_KERNEL); |
if (ehci != 0) { |
memset (ehci, 0, sizeof (struct ehci_hcd)); |
ehci->hcd.product_desc = "EHCI Host Controller"; |
return &ehci->hcd; |
} |
return 0; |
} |
static void ehci_hcd_free (struct usb_hcd *hcd) |
{ |
kfree (hcd_to_ehci (hcd)); |
} |
/*-------------------------------------------------------------------------*/ |
/* Allocate the key transfer structures from the previously allocated pool */ |
static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma) |
{ |
memset (qtd, 0, sizeof *qtd); |
qtd->qtd_dma = dma; |
qtd->hw_token = cpu_to_le32 (QTD_STS_HALT); |
qtd->hw_next = EHCI_LIST_END; |
qtd->hw_alt_next = EHCI_LIST_END; |
INIT_LIST_HEAD (&qtd->qtd_list); |
} |
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags) |
{ |
struct ehci_qtd *qtd; |
dma_addr_t dma; |
qtd = pci_pool_alloc_usb (ehci->qtd_pool, flags, &dma); |
if (qtd != 0) { |
ehci_qtd_init (qtd, dma); |
} |
return qtd; |
} |
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) |
{ |
pci_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); |
} |
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) |
{ |
struct ehci_qh *qh; |
dma_addr_t dma; |
qh = (struct ehci_qh *) |
pci_pool_alloc_usb (ehci->qh_pool, flags, &dma); |
if (!qh) |
return qh; |
memset (qh, 0, sizeof *qh); |
atomic_set (&qh->refcount, 1); |
qh->qh_dma = dma; |
// INIT_LIST_HEAD (&qh->qh_list); |
INIT_LIST_HEAD (&qh->qtd_list); |
/* dummy td enables safe urb queuing */ |
qh->dummy = ehci_qtd_alloc (ehci, flags); |
if (qh->dummy == 0) { |
ehci_dbg (ehci, "no dummy td\n"); |
pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); |
qh = 0; |
} |
return qh; |
} |
/* to share a qh (cpu threads, or hc) */ |
static inline struct ehci_qh *qh_get (/* ehci, */ struct ehci_qh *qh) |
{ |
atomic_inc (&qh->refcount); |
return qh; |
} |
static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh) |
{ |
if (!atomic_dec_and_test (&qh->refcount)) |
return; |
/* clean qtds first, and know this is not linked */ |
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { |
ehci_dbg (ehci, "unused qh not empty!\n"); |
BUG (); |
} |
if (qh->dummy) |
ehci_qtd_free (ehci, qh->dummy); |
pci_pool_free (ehci->qh_pool, qh, qh->qh_dma); |
} |
/*-------------------------------------------------------------------------*/ |
/* The queue heads and transfer descriptors are managed from pools tied |
* to each of the "per device" structures. |
* This is the initialisation and cleanup code. |
*/ |
static void ehci_mem_cleanup (struct ehci_hcd *ehci) |
{ |
if (ehci->async) |
qh_put (ehci, ehci->async); |
ehci->async = 0; |
/* PCI consistent memory and pools */ |
if (ehci->qtd_pool) |
pci_pool_destroy (ehci->qtd_pool); |
ehci->qtd_pool = 0; |
if (ehci->qh_pool) { |
pci_pool_destroy (ehci->qh_pool); |
ehci->qh_pool = 0; |
} |
if (ehci->itd_pool) |
pci_pool_destroy (ehci->itd_pool); |
ehci->itd_pool = 0; |
if (ehci->sitd_pool) |
pci_pool_destroy (ehci->sitd_pool); |
ehci->sitd_pool = 0; |
if (ehci->periodic) |
pci_free_consistent (ehci->hcd.pdev, |
ehci->periodic_size * sizeof (u32), |
ehci->periodic, ehci->periodic_dma); |
ehci->periodic = 0; |
/* shadow periodic table */ |
if (ehci->pshadow) |
kfree (ehci->pshadow); |
ehci->pshadow = 0; |
} |
/* remember to add cleanup code (above) if you add anything here */ |
static int ehci_mem_init (struct ehci_hcd *ehci, int flags) |
{ |
int i; |
/* QTDs for control/bulk/intr transfers */ |
ehci->qtd_pool = pci_pool_create ("ehci_qtd", ehci->hcd.pdev, |
sizeof (struct ehci_qtd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->qtd_pool) { |
goto fail; |
} |
/* QHs for control/bulk/intr transfers */ |
ehci->qh_pool = pci_pool_create ("ehci_qh", ehci->hcd.pdev, |
sizeof (struct ehci_qh), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->qh_pool) { |
goto fail; |
} |
ehci->async = ehci_qh_alloc (ehci, flags); |
if (!ehci->async) { |
goto fail; |
} |
/* ITD for high speed ISO transfers */ |
ehci->itd_pool = pci_pool_create ("ehci_itd", ehci->hcd.pdev, |
sizeof (struct ehci_itd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->itd_pool) { |
goto fail; |
} |
/* SITD for full/low speed split ISO transfers */ |
ehci->sitd_pool = pci_pool_create ("ehci_sitd", ehci->hcd.pdev, |
sizeof (struct ehci_sitd), |
32 /* byte alignment (for hw parts) */, |
4096 /* can't cross 4K */); |
if (!ehci->sitd_pool) { |
goto fail; |
} |
/* Hardware periodic table */ |
ehci->periodic = (u32 *) |
pci_alloc_consistent_usb (ehci->hcd.pdev, |
ehci->periodic_size * sizeof (u32), |
&ehci->periodic_dma); |
if (ehci->periodic == 0) { |
goto fail; |
} |
for (i = 0; i < ehci->periodic_size; i++) |
ehci->periodic [i] = EHCI_LIST_END; |
/* software shadow of hardware table */ |
ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags); |
if (ehci->pshadow == 0) { |
goto fail; |
} |
memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *)); |
return 0; |
fail: |
ehci_dbg (ehci, "couldn't init memory\n"); |
ehci_mem_cleanup (ehci); |
return -ENOMEM; |
} |
/shark/trunk/drivers/usb/host/ohci-pci.c |
---|
1,396 → 1,396 |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* [ Initialisation is based on Linus' ] |
* [ uhci code and gregs ohci fragments ] |
* [ (C) Copyright 1999 Linus Torvalds ] |
* [ (C) Copyright 1999 Gregory P. Smith] |
* |
* PCI Bus Glue |
* |
* This file is licenced under the GPL. |
*/ |
#ifdef CONFIG_PMAC_PBOOK |
#include <asm/machdep.h> |
#include <asm/pmac_feature.h> |
#include <asm/pci-bridge.h> |
#include <asm/prom.h> |
#ifndef CONFIG_PM |
# define CONFIG_PM |
#endif |
#endif |
#ifndef CONFIG_PCI |
#error "This file is PCI bus glue. CONFIG_PCI must be defined." |
#endif |
/*-------------------------------------------------------------------------*/ |
static int |
ohci_pci_reset (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
ohci->regs = hcd->regs; |
return hc_reset (ohci); |
} |
static int __devinit |
ohci_pci_start (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int ret; |
if (hcd->pdev) { |
ohci->hcca = pci_alloc_consistent (hcd->pdev, |
sizeof *ohci->hcca, &ohci->hcca_dma); |
if (!ohci->hcca) |
return -ENOMEM; |
/* AMD 756, for most chips (early revs), corrupts register |
* values on read ... so enable the vendor workaround. |
*/ |
if (hcd->pdev->vendor == PCI_VENDOR_ID_AMD |
&& hcd->pdev->device == 0x740c) { |
ohci->flags = OHCI_QUIRK_AMD756; |
ohci_info (ohci, "AMD756 erratum 4 workaround\n"); |
} |
/* FIXME for some of the early AMD 760 southbridges, OHCI |
* won't work at all. blacklist them. |
*/ |
/* Apple's OHCI driver has a lot of bizarre workarounds |
* for this chip. Evidently control and bulk lists |
* can get confused. (B&W G3 models, and ...) |
*/ |
else if (hcd->pdev->vendor == PCI_VENDOR_ID_OPTI |
&& hcd->pdev->device == 0xc861) { |
ohci_info (ohci, |
"WARNING: OPTi workarounds unavailable\n"); |
} |
/* Check for NSC87560. We have to look at the bridge (fn1) to |
* identify the USB (fn2). This quirk might apply to more or |
* even all NSC stuff. |
*/ |
else if (hcd->pdev->vendor == PCI_VENDOR_ID_NS) { |
struct pci_dev *b, *hc; |
hc = hcd->pdev; |
b = pci_find_slot (hc->bus->number, |
PCI_DEVFN (PCI_SLOT (hc->devfn), 1)); |
if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO |
&& b->vendor == PCI_VENDOR_ID_NS) { |
ohci->flags |= OHCI_QUIRK_SUPERIO; |
ohci_info (ohci, "Using NSC SuperIO setup\n"); |
} |
} |
} |
memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); |
if ((ret = ohci_mem_init (ohci)) < 0) { |
ohci_stop (hcd); |
return ret; |
} |
if (hc_start (ohci) < 0) { |
ohci_err (ohci, "can't start\n"); |
ohci_stop (hcd); |
return -EBUSY; |
} |
create_debug_files (ohci); |
#ifdef DEBUG |
ohci_dump (ohci, 1); |
#endif |
return 0; |
} |
#ifdef CONFIG_PM |
static int ohci_pci_suspend (struct usb_hcd *hcd, u32 state) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
u16 cmd; |
u32 tmp; |
if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER) { |
ohci_dbg (ohci, "can't suspend (state is %s)\n", |
hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS)); |
return -EIO; |
} |
/* act as if usb suspend can always be used */ |
ohci_dbg (ohci, "suspend to %d\n", state); |
/* First stop processing */ |
spin_lock_irq (&ohci->lock); |
ohci->hc_control &= |
~(OHCI_CTRL_PLE|OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_IE); |
writel (ohci->hc_control, &ohci->regs->control); |
writel (OHCI_INTR_SF, &ohci->regs->intrstatus); |
(void) readl (&ohci->regs->intrstatus); |
spin_unlock_irq (&ohci->lock); |
/* Wait a frame or two */ |
mdelay (1); |
if (!readl (&ohci->regs->intrstatus) & OHCI_INTR_SF) |
mdelay (1); |
#ifdef CONFIG_PMAC_PBOOK |
if (_machine == _MACH_Pmac) |
disable_irq (hcd->pdev->irq); |
/* else, 2.4 assumes shared irqs -- don't disable */ |
#endif |
/* Enable remote wakeup */ |
writel (readl (&ohci->regs->intrenable) | OHCI_INTR_RD, |
&ohci->regs->intrenable); |
/* Suspend chip and let things settle down a bit */ |
spin_lock_irq (&ohci->lock); |
ohci->hc_control = OHCI_USB_SUSPEND; |
writel (ohci->hc_control, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
spin_unlock_irq (&ohci->lock); |
set_current_state (TASK_UNINTERRUPTIBLE); |
schedule_timeout (HZ/2); |
tmp = readl (&ohci->regs->control) | OHCI_CTRL_HCFS; |
switch (tmp) { |
case OHCI_USB_RESET: |
case OHCI_USB_RESUME: |
case OHCI_USB_OPER: |
ohci_err (ohci, "can't suspend; hcfs %d\n", tmp); |
break; |
case OHCI_USB_SUSPEND: |
ohci_dbg (ohci, "suspended\n"); |
break; |
} |
/* In some rare situations, Apple's OHCI have happily trashed |
* memory during sleep. We disable its bus master bit during |
* suspend |
*/ |
pci_read_config_word (hcd->pdev, PCI_COMMAND, &cmd); |
cmd &= ~PCI_COMMAND_MASTER; |
pci_write_config_word (hcd->pdev, PCI_COMMAND, cmd); |
#ifdef CONFIG_PMAC_PBOOK |
{ |
struct device_node *of_node; |
/* Disable USB PAD & cell clock */ |
of_node = pci_device_to_OF_node (hcd->pdev); |
if (of_node) |
pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0); |
} |
#endif |
return 0; |
} |
static int ohci_pci_resume (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int temp; |
int retval = 0; |
#ifdef CONFIG_PMAC_PBOOK |
{ |
struct device_node *of_node; |
/* Re-enable USB PAD & cell clock */ |
of_node = pci_device_to_OF_node (hcd->pdev); |
if (of_node) |
pmac_call_feature (PMAC_FTR_USB_ENABLE, of_node, 0, 1); |
} |
#endif |
/* did we suspend, or were we powered off? */ |
ohci->hc_control = readl (&ohci->regs->control); |
temp = ohci->hc_control & OHCI_CTRL_HCFS; |
#ifdef DEBUG |
/* the registers may look crazy here */ |
ohci_dump_status (ohci, 0, 0); |
#endif |
/* Re-enable bus mastering */ |
pci_set_master (ohci->hcd.pdev); |
switch (temp) { |
case OHCI_USB_RESET: // lost power |
restart: |
ohci_info (ohci, "USB restart\n"); |
retval = hc_restart (ohci); |
break; |
case OHCI_USB_SUSPEND: // host wakeup |
case OHCI_USB_RESUME: // remote wakeup |
ohci_info (ohci, "USB continue from %s wakeup\n", |
(temp == OHCI_USB_SUSPEND) |
? "host" : "remote"); |
/* we "should" only need RESUME if we're SUSPENDed ... */ |
ohci->hc_control = OHCI_USB_RESUME; |
writel (ohci->hc_control, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
/* Some controllers (lucent) need extra-long delays */ |
mdelay (35); /* no schedule here ! */ |
temp = readl (&ohci->regs->control); |
temp = ohci->hc_control & OHCI_CTRL_HCFS; |
if (temp != OHCI_USB_RESUME) { |
ohci_err (ohci, "controller won't resume\n"); |
/* maybe we can reset */ |
goto restart; |
} |
/* Then re-enable operations */ |
writel (OHCI_USB_OPER, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
mdelay (3); |
spin_lock_irq (&ohci->lock); |
ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER; |
if (!ohci->ed_rm_list) { |
if (ohci->ed_controltail) |
ohci->hc_control |= OHCI_CTRL_CLE; |
if (ohci->ed_bulktail) |
ohci->hc_control |= OHCI_CTRL_BLE; |
} |
hcd->state = USB_STATE_RUNNING; |
writel (ohci->hc_control, &ohci->regs->control); |
/* trigger a start-frame interrupt (why?) */ |
writel (OHCI_INTR_SF, &ohci->regs->intrstatus); |
writel (OHCI_INTR_SF, &ohci->regs->intrenable); |
writel (OHCI_INTR_WDH, &ohci->regs->intrdisable); |
(void) readl (&ohci->regs->intrdisable); |
spin_unlock_irq (&ohci->lock); |
#ifdef CONFIG_PMAC_PBOOK |
if (_machine == _MACH_Pmac) |
enable_irq (hcd->pdev->irq); |
#endif |
/* Check for a pending done list */ |
if (ohci->hcca->done_head) |
dl_done_list (ohci, dl_reverse_done_list (ohci), NULL); |
writel (OHCI_INTR_WDH, &ohci->regs->intrenable); |
/* assume there are TDs on the bulk and control lists */ |
writel (OHCI_BLF | OHCI_CLF, &ohci->regs->cmdstatus); |
break; |
default: |
ohci_warn (ohci, "odd PCI resume\n"); |
} |
return retval; |
} |
#endif /* CONFIG_PM */ |
/*-------------------------------------------------------------------------*/ |
static const struct hc_driver ohci_pci_hc_driver = { |
.description = hcd_name, |
/* |
* generic hardware linkage |
*/ |
.irq = ohci_irq, |
.flags = HCD_MEMORY | HCD_USB11, |
/* |
* basic lifecycle operations |
*/ |
.reset = ohci_pci_reset, |
.start = ohci_pci_start, |
#ifdef CONFIG_PM |
.suspend = ohci_pci_suspend, |
.resume = ohci_pci_resume, |
#endif |
.stop = ohci_stop, |
/* |
* memory lifecycle (except per-request) |
*/ |
.hcd_alloc = ohci_hcd_alloc, |
.hcd_free = ohci_hcd_free, |
/* |
* managing i/o requests and associated device resources |
*/ |
.urb_enqueue = ohci_urb_enqueue, |
.urb_dequeue = ohci_urb_dequeue, |
.endpoint_disable = ohci_endpoint_disable, |
/* |
* scheduling support |
*/ |
.get_frame_number = ohci_get_frame, |
/* |
* root hub support |
*/ |
.hub_status_data = ohci_hub_status_data, |
.hub_control = ohci_hub_control, |
}; |
/*-------------------------------------------------------------------------*/ |
static const struct pci_device_id pci_ids [] = { { |
/* handle any USB OHCI controller */ |
PCI_DEVICE_CLASS((PCI_CLASS_SERIAL_USB << 8) | 0x10, ~0), |
.driver_data = (unsigned long) &ohci_pci_hc_driver, |
}, { /* end: all zeroes */ } |
}; |
MODULE_DEVICE_TABLE (pci, pci_ids); |
/* pci driver glue; this is a "new style" PCI driver module */ |
static struct pci_driver ohci_pci_driver = { |
.name = (char *) hcd_name, |
.id_table = pci_ids, |
.probe = usb_hcd_pci_probe, |
.remove = usb_hcd_pci_remove, |
#ifdef CONFIG_PM |
.suspend = usb_hcd_pci_suspend, |
.resume = usb_hcd_pci_resume, |
#endif |
}; |
/*static*/ int __init ohci_hcd_pci_init (void) |
{ |
printk (KERN_DEBUG "%s: " DRIVER_INFO " (PCI)\n", hcd_name); |
if (usb_disabled()) |
return -ENODEV; |
printk (KERN_DEBUG "%s: block sizes: ed %Zd td %Zd\n", hcd_name, |
sizeof (struct ed), sizeof (struct td)); |
//*** printk (KERN_DEBUG "File: %s @Line:%d\n", __FILE__, __LINE__); |
return pci_module_init (&ohci_pci_driver); |
} |
module_init (ohci_hcd_pci_init); |
/*-------------------------------------------------------------------------*/ |
/*static*/ void /*__exit*/ ohci_hcd_pci_cleanup (void) |
{ |
pci_unregister_driver (&ohci_pci_driver); |
} |
module_exit (ohci_hcd_pci_cleanup); |
/* |
* OHCI HCD (Host Controller Driver) for USB. |
* |
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> |
* |
* [ Initialisation is based on Linus' ] |
* [ uhci code and gregs ohci fragments ] |
* [ (C) Copyright 1999 Linus Torvalds ] |
* [ (C) Copyright 1999 Gregory P. Smith] |
* |
* PCI Bus Glue |
* |
* This file is licenced under the GPL. |
*/ |
#ifdef CONFIG_PMAC_PBOOK |
#include <asm/machdep.h> |
#include <asm/pmac_feature.h> |
#include <asm/pci-bridge.h> |
#include <asm/prom.h> |
#ifndef CONFIG_PM |
# define CONFIG_PM |
#endif |
#endif |
#ifndef CONFIG_PCI |
#error "This file is PCI bus glue. CONFIG_PCI must be defined." |
#endif |
/*-------------------------------------------------------------------------*/ |
static int |
ohci_pci_reset (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
ohci->regs = hcd->regs; |
return hc_reset (ohci); |
} |
static int __devinit |
ohci_pci_start (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int ret; |
if (hcd->pdev) { |
ohci->hcca = pci_alloc_consistent_usb (hcd->pdev, |
sizeof *ohci->hcca, &ohci->hcca_dma); |
if (!ohci->hcca) |
return -ENOMEM; |
/* AMD 756, for most chips (early revs), corrupts register |
* values on read ... so enable the vendor workaround. |
*/ |
if (hcd->pdev->vendor == PCI_VENDOR_ID_AMD |
&& hcd->pdev->device == 0x740c) { |
ohci->flags = OHCI_QUIRK_AMD756; |
ohci_info (ohci, "AMD756 erratum 4 workaround\n"); |
} |
/* FIXME for some of the early AMD 760 southbridges, OHCI |
* won't work at all. blacklist them. |
*/ |
/* Apple's OHCI driver has a lot of bizarre workarounds |
* for this chip. Evidently control and bulk lists |
* can get confused. (B&W G3 models, and ...) |
*/ |
else if (hcd->pdev->vendor == PCI_VENDOR_ID_OPTI |
&& hcd->pdev->device == 0xc861) { |
ohci_info (ohci, |
"WARNING: OPTi workarounds unavailable\n"); |
} |
/* Check for NSC87560. We have to look at the bridge (fn1) to |
* identify the USB (fn2). This quirk might apply to more or |
* even all NSC stuff. |
*/ |
else if (hcd->pdev->vendor == PCI_VENDOR_ID_NS) { |
struct pci_dev *b, *hc; |
hc = hcd->pdev; |
b = pci_find_slot (hc->bus->number, |
PCI_DEVFN (PCI_SLOT (hc->devfn), 1)); |
if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO |
&& b->vendor == PCI_VENDOR_ID_NS) { |
ohci->flags |= OHCI_QUIRK_SUPERIO; |
ohci_info (ohci, "Using NSC SuperIO setup\n"); |
} |
} |
} |
memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); |
if ((ret = ohci_mem_init (ohci)) < 0) { |
ohci_stop (hcd); |
return ret; |
} |
if (hc_start (ohci) < 0) { |
ohci_err (ohci, "can't start\n"); |
ohci_stop (hcd); |
return -EBUSY; |
} |
create_debug_files (ohci); |
#ifdef DEBUG |
ohci_dump (ohci, 1); |
#endif |
return 0; |
} |
#ifdef CONFIG_PM |
static int ohci_pci_suspend (struct usb_hcd *hcd, u32 state) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
u16 cmd; |
u32 tmp; |
if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER) { |
ohci_dbg (ohci, "can't suspend (state is %s)\n", |
hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS)); |
return -EIO; |
} |
/* act as if usb suspend can always be used */ |
ohci_dbg (ohci, "suspend to %d\n", state); |
/* First stop processing */ |
spin_lock_irq (&ohci->lock); |
ohci->hc_control &= |
~(OHCI_CTRL_PLE|OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_IE); |
writel (ohci->hc_control, &ohci->regs->control); |
writel (OHCI_INTR_SF, &ohci->regs->intrstatus); |
(void) readl (&ohci->regs->intrstatus); |
spin_unlock_irq (&ohci->lock); |
/* Wait a frame or two */ |
mdelay (1); |
if (!readl (&ohci->regs->intrstatus) & OHCI_INTR_SF) |
mdelay (1); |
#ifdef CONFIG_PMAC_PBOOK |
if (_machine == _MACH_Pmac) |
disable_irq (hcd->pdev->irq); |
/* else, 2.4 assumes shared irqs -- don't disable */ |
#endif |
/* Enable remote wakeup */ |
writel (readl (&ohci->regs->intrenable) | OHCI_INTR_RD, |
&ohci->regs->intrenable); |
/* Suspend chip and let things settle down a bit */ |
spin_lock_irq (&ohci->lock); |
ohci->hc_control = OHCI_USB_SUSPEND; |
writel (ohci->hc_control, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
spin_unlock_irq (&ohci->lock); |
set_current_state (TASK_UNINTERRUPTIBLE); |
schedule_timeout (HZ/2); |
tmp = readl (&ohci->regs->control) | OHCI_CTRL_HCFS; |
switch (tmp) { |
case OHCI_USB_RESET: |
case OHCI_USB_RESUME: |
case OHCI_USB_OPER: |
ohci_err (ohci, "can't suspend; hcfs %d\n", tmp); |
break; |
case OHCI_USB_SUSPEND: |
ohci_dbg (ohci, "suspended\n"); |
break; |
} |
/* In some rare situations, Apple's OHCI have happily trashed |
* memory during sleep. We disable its bus master bit during |
* suspend |
*/ |
pci_read_config_word (hcd->pdev, PCI_COMMAND, &cmd); |
cmd &= ~PCI_COMMAND_MASTER; |
pci_write_config_word (hcd->pdev, PCI_COMMAND, cmd); |
#ifdef CONFIG_PMAC_PBOOK |
{ |
struct device_node *of_node; |
/* Disable USB PAD & cell clock */ |
of_node = pci_device_to_OF_node (hcd->pdev); |
if (of_node) |
pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0); |
} |
#endif |
return 0; |
} |
static int ohci_pci_resume (struct usb_hcd *hcd) |
{ |
struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
int temp; |
int retval = 0; |
#ifdef CONFIG_PMAC_PBOOK |
{ |
struct device_node *of_node; |
/* Re-enable USB PAD & cell clock */ |
of_node = pci_device_to_OF_node (hcd->pdev); |
if (of_node) |
pmac_call_feature (PMAC_FTR_USB_ENABLE, of_node, 0, 1); |
} |
#endif |
/* did we suspend, or were we powered off? */ |
ohci->hc_control = readl (&ohci->regs->control); |
temp = ohci->hc_control & OHCI_CTRL_HCFS; |
#ifdef DEBUG |
/* the registers may look crazy here */ |
ohci_dump_status (ohci, 0, 0); |
#endif |
/* Re-enable bus mastering */ |
pci_set_master (ohci->hcd.pdev); |
switch (temp) { |
case OHCI_USB_RESET: // lost power |
restart: |
ohci_info (ohci, "USB restart\n"); |
retval = hc_restart (ohci); |
break; |
case OHCI_USB_SUSPEND: // host wakeup |
case OHCI_USB_RESUME: // remote wakeup |
ohci_info (ohci, "USB continue from %s wakeup\n", |
(temp == OHCI_USB_SUSPEND) |
? "host" : "remote"); |
/* we "should" only need RESUME if we're SUSPENDed ... */ |
ohci->hc_control = OHCI_USB_RESUME; |
writel (ohci->hc_control, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
/* Some controllers (lucent) need extra-long delays */ |
mdelay (35); /* no schedule here ! */ |
temp = readl (&ohci->regs->control); |
temp = ohci->hc_control & OHCI_CTRL_HCFS; |
if (temp != OHCI_USB_RESUME) { |
ohci_err (ohci, "controller won't resume\n"); |
/* maybe we can reset */ |
goto restart; |
} |
/* Then re-enable operations */ |
writel (OHCI_USB_OPER, &ohci->regs->control); |
(void) readl (&ohci->regs->control); |
mdelay (3); |
spin_lock_irq (&ohci->lock); |
ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER; |
if (!ohci->ed_rm_list) { |
if (ohci->ed_controltail) |
ohci->hc_control |= OHCI_CTRL_CLE; |
if (ohci->ed_bulktail) |
ohci->hc_control |= OHCI_CTRL_BLE; |
} |
hcd->state = USB_STATE_RUNNING; |
writel (ohci->hc_control, &ohci->regs->control); |
/* trigger a start-frame interrupt (why?) */ |
writel (OHCI_INTR_SF, &ohci->regs->intrstatus); |
writel (OHCI_INTR_SF, &ohci->regs->intrenable); |
writel (OHCI_INTR_WDH, &ohci->regs->intrdisable); |
(void) readl (&ohci->regs->intrdisable); |
spin_unlock_irq (&ohci->lock); |
#ifdef CONFIG_PMAC_PBOOK |
if (_machine == _MACH_Pmac) |
enable_irq (hcd->pdev->irq); |
#endif |
/* Check for a pending done list */ |
if (ohci->hcca->done_head) |
dl_done_list (ohci, dl_reverse_done_list (ohci), NULL); |
writel (OHCI_INTR_WDH, &ohci->regs->intrenable); |
/* assume there are TDs on the bulk and control lists */ |
writel (OHCI_BLF | OHCI_CLF, &ohci->regs->cmdstatus); |
break; |
default: |
ohci_warn (ohci, "odd PCI resume\n"); |
} |
return retval; |
} |
#endif /* CONFIG_PM */ |
/*-------------------------------------------------------------------------*/ |
static const struct hc_driver ohci_pci_hc_driver = { |
.description = hcd_name, |
/* |
* generic hardware linkage |
*/ |
.irq = ohci_irq, |
.flags = HCD_MEMORY | HCD_USB11, |
/* |
* basic lifecycle operations |
*/ |
.reset = ohci_pci_reset, |
.start = ohci_pci_start, |
#ifdef CONFIG_PM |
.suspend = ohci_pci_suspend, |
.resume = ohci_pci_resume, |
#endif |
.stop = ohci_stop, |
/* |
* memory lifecycle (except per-request) |
*/ |
.hcd_alloc = ohci_hcd_alloc, |
.hcd_free = ohci_hcd_free, |
/* |
* managing i/o requests and associated device resources |
*/ |
.urb_enqueue = ohci_urb_enqueue, |
.urb_dequeue = ohci_urb_dequeue, |
.endpoint_disable = ohci_endpoint_disable, |
/* |
* scheduling support |
*/ |
.get_frame_number = ohci_get_frame, |
/* |
* root hub support |
*/ |
.hub_status_data = ohci_hub_status_data, |
.hub_control = ohci_hub_control, |
}; |
/*-------------------------------------------------------------------------*/ |
static const struct pci_device_id pci_ids [] = { { |
/* handle any USB OHCI controller */ |
PCI_DEVICE_CLASS((PCI_CLASS_SERIAL_USB << 8) | 0x10, ~0), |
.driver_data = (unsigned long) &ohci_pci_hc_driver, |
}, { /* end: all zeroes */ } |
}; |
MODULE_DEVICE_TABLE (pci, pci_ids); |
/* pci driver glue; this is a "new style" PCI driver module */ |
static struct pci_driver ohci_pci_driver = { |
.name = (char *) hcd_name, |
.id_table = pci_ids, |
.probe = usb_hcd_pci_probe, |
.remove = usb_hcd_pci_remove, |
#ifdef CONFIG_PM |
.suspend = usb_hcd_pci_suspend, |
.resume = usb_hcd_pci_resume, |
#endif |
}; |
/*static*/ int __init ohci_hcd_pci_init (void) |
{ |
printk (KERN_DEBUG "%s: " DRIVER_INFO " (PCI)\n", hcd_name); |
if (usb_disabled()) |
return -ENODEV; |
printk (KERN_DEBUG "%s: block sizes: ed %Zd td %Zd\n", hcd_name, |
sizeof (struct ed), sizeof (struct td)); |
//*** printk (KERN_DEBUG "File: %s @Line:%d\n", __FILE__, __LINE__); |
return pci_module_init (&ohci_pci_driver); |
} |
module_init (ohci_hcd_pci_init); |
/*-------------------------------------------------------------------------*/ |
/*static*/ void /*__exit*/ ohci_hcd_pci_cleanup (void) |
{ |
pci_unregister_driver (&ohci_pci_driver); |
} |
module_exit (ohci_hcd_pci_cleanup); |
/shark/trunk/drivers/usb/host/ehci-sched.c |
---|
1,1123 → 1,1123 |
/* |
* Copyright (c) 2001-2002 by David Brownell |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License as published by the |
* Free Software Foundation; either version 2 of the License, or (at your |
* option) any later version. |
* |
* This program is distributed in the hope that it will be useful, but |
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
* for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software Foundation, |
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
/* this file is part of ehci-hcd.c */ |
/*-------------------------------------------------------------------------*/ |
/* |
* EHCI scheduled transaction support: interrupt, iso, split iso |
* These are called "periodic" transactions in the EHCI spec. |
* |
* Note that for interrupt transfers, the QH/QTD manipulation is shared |
* with the "asynchronous" transaction support (control/bulk transfers). |
* The only real difference is in how interrupt transfers are scheduled. |
* We get some funky API restrictions from the current URB model, which |
* works notably better for reading transfers than for writing. (And |
* which accordingly needs to change before it'll work inside devices, |
* or with "USB On The Go" additions to USB 2.0 ...) |
*/ |
static int ehci_get_frame (struct usb_hcd *hcd); |
/*-------------------------------------------------------------------------*/ |
/* |
* periodic_next_shadow - return "next" pointer on shadow list |
* @periodic: host pointer to qh/itd/sitd |
* @tag: hardware tag for type of this record |
*/ |
static union ehci_shadow * |
periodic_next_shadow (union ehci_shadow *periodic, int tag) |
{ |
switch (tag) { |
case Q_TYPE_QH: |
return &periodic->qh->qh_next; |
case Q_TYPE_FSTN: |
return &periodic->fstn->fstn_next; |
case Q_TYPE_ITD: |
return &periodic->itd->itd_next; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
return &periodic->sitd->sitd_next; |
#endif /* have_split_iso */ |
} |
dbg ("BAD shadow %p tag %d", periodic->ptr, tag); |
// BUG (); |
return 0; |
} |
/* returns true after successful unlink */ |
/* caller must hold ehci->lock */ |
static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) |
{ |
union ehci_shadow *prev_p = &ehci->pshadow [frame]; |
u32 *hw_p = &ehci->periodic [frame]; |
union ehci_shadow here = *prev_p; |
union ehci_shadow *next_p; |
/* find predecessor of "ptr"; hw and shadow lists are in sync */ |
while (here.ptr && here.ptr != ptr) { |
prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); |
hw_p = &here.qh->hw_next; |
here = *prev_p; |
} |
/* an interrupt entry (at list end) could have been shared */ |
if (!here.ptr) { |
dbg ("entry %p no longer on frame [%d]", ptr, frame); |
return 0; |
} |
// vdbg ("periodic unlink %p from frame %d", ptr, frame); |
/* update hardware list ... HC may still know the old structure, so |
* don't change hw_next until it'll have purged its cache |
*/ |
next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); |
*hw_p = here.qh->hw_next; |
/* unlink from shadow list; HCD won't see old structure again */ |
*prev_p = *next_p; |
next_p->ptr = 0; |
return 1; |
} |
/* how many of the uframe's 125 usecs are allocated? */ |
static unsigned short |
periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) |
{ |
u32 *hw_p = &ehci->periodic [frame]; |
union ehci_shadow *q = &ehci->pshadow [frame]; |
unsigned usecs = 0; |
while (q->ptr) { |
switch (Q_NEXT_TYPE (*hw_p)) { |
case Q_TYPE_QH: |
/* is it in the S-mask? */ |
if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) |
usecs += q->qh->usecs; |
/* ... or C-mask? */ |
if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) |
usecs += q->qh->c_usecs; |
q = &q->qh->qh_next; |
break; |
case Q_TYPE_FSTN: |
/* for "save place" FSTNs, count the relevant INTR |
* bandwidth from the previous frame |
*/ |
if (q->fstn->hw_prev != EHCI_LIST_END) { |
dbg ("not counting FSTN bandwidth yet ..."); |
} |
q = &q->fstn->fstn_next; |
break; |
case Q_TYPE_ITD: |
/* NOTE the "one uframe per itd" policy */ |
if (q->itd->hw_transaction [uframe] != 0) |
usecs += q->itd->usecs; |
q = &q->itd->itd_next; |
break; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
temp = q->sitd->hw_fullspeed_ep & |
__constant_cpu_to_le32 (1 << 31); |
// FIXME: this doesn't count data bytes right... |
/* is it in the S-mask? (count SPLIT, DATA) */ |
if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { |
if (temp) |
usecs += HS_USECS (188); |
else |
usecs += HS_USECS (1); |
} |
/* ... C-mask? (count CSPLIT, DATA) */ |
if (q->sitd->hw_uframe & |
cpu_to_le32 (1 << (8 + uframe))) { |
if (temp) |
usecs += HS_USECS (0); |
else |
usecs += HS_USECS (188); |
} |
q = &q->sitd->sitd_next; |
break; |
#endif /* have_split_iso */ |
default: |
BUG (); |
} |
} |
#ifdef DEBUG |
if (usecs > 100) |
err ("overallocated uframe %d, periodic is %d usecs", |
frame * 8 + uframe, usecs); |
#endif |
return usecs; |
} |
/*-------------------------------------------------------------------------*/ |
static int enable_periodic (struct ehci_hcd *ehci) |
{ |
u32 cmd; |
int status; |
/* did clearing PSE did take effect yet? |
* takes effect only at frame boundaries... |
*/ |
status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); |
if (status != 0) { |
ehci->hcd.state = USB_STATE_HALT; |
return status; |
} |
cmd = readl (&ehci->regs->command) | CMD_PSE; |
writel (cmd, &ehci->regs->command); |
/* posted write ... PSS happens later */ |
ehci->hcd.state = USB_STATE_RUNNING; |
/* make sure ehci_work scans these */ |
ehci->next_uframe = readl (&ehci->regs->frame_index) |
% (ehci->periodic_size << 3); |
return 0; |
} |
static int disable_periodic (struct ehci_hcd *ehci) |
{ |
u32 cmd; |
int status; |
/* did setting PSE not take effect yet? |
* takes effect only at frame boundaries... |
*/ |
status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); |
if (status != 0) { |
ehci->hcd.state = USB_STATE_HALT; |
return status; |
} |
cmd = readl (&ehci->regs->command) & ~CMD_PSE; |
writel (cmd, &ehci->regs->command); |
/* posted write ... */ |
ehci->next_uframe = -1; |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
// FIXME microframe periods not yet handled |
static void intr_deschedule ( |
struct ehci_hcd *ehci, |
struct ehci_qh *qh, |
int wait |
) { |
int status; |
unsigned frame = qh->start; |
do { |
periodic_unlink (ehci, frame, qh); |
qh_put (ehci, qh); |
frame += qh->period; |
} while (frame < ehci->periodic_size); |
qh->qh_state = QH_STATE_UNLINK; |
qh->qh_next.ptr = 0; |
ehci->periodic_sched--; |
/* maybe turn off periodic schedule */ |
if (!ehci->periodic_sched) |
status = disable_periodic (ehci); |
else { |
status = 0; |
vdbg ("periodic schedule still enabled"); |
} |
/* |
* If the hc may be looking at this qh, then delay a uframe |
* (yeech!) to be sure it's done. |
* No other threads may be mucking with this qh. |
*/ |
if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) { |
if (wait) { |
udelay (125); |
qh->hw_next = EHCI_LIST_END; |
} else { |
/* we may not be IDLE yet, but if the qh is empty |
* the race is very short. then if qh also isn't |
* rescheduled soon, it won't matter. otherwise... |
*/ |
vdbg ("intr_deschedule..."); |
} |
} else |
qh->hw_next = EHCI_LIST_END; |
qh->qh_state = QH_STATE_IDLE; |
/* update per-qh bandwidth utilization (for usbfs) */ |
hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= |
(qh->usecs + qh->c_usecs) / qh->period; |
dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d", |
qh, qh->period, frame, |
atomic_read (&qh->refcount), ehci->periodic_sched); |
} |
static int check_period ( |
struct ehci_hcd *ehci, |
unsigned frame, |
unsigned uframe, |
unsigned period, |
unsigned usecs |
) { |
/* complete split running into next frame? |
* given FSTN support, we could sometimes check... |
*/ |
if (uframe >= 8) |
return 0; |
/* |
* 80% periodic == 100 usec/uframe available |
* convert "usecs we need" to "max already claimed" |
*/ |
usecs = 100 - usecs; |
do { |
int claimed; |
// FIXME delete when intr_submit handles non-empty queues |
// this gives us a one intr/frame limit (vs N/uframe) |
// ... and also lets us avoid tracking split transactions |
// that might collide at a given TT/hub. |
if (ehci->pshadow [frame].ptr) |
return 0; |
claimed = periodic_usecs (ehci, frame, uframe); |
if (claimed > usecs) |
return 0; |
// FIXME update to handle sub-frame periods |
} while ((frame += period) < ehci->periodic_size); |
// success! |
return 1; |
} |
static int check_intr_schedule ( |
struct ehci_hcd *ehci, |
unsigned frame, |
unsigned uframe, |
const struct ehci_qh *qh, |
u32 *c_maskp |
) |
{ |
int retval = -ENOSPC; |
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) |
goto done; |
if (!qh->c_usecs) { |
retval = 0; |
*c_maskp = cpu_to_le32 (0); |
goto done; |
} |
/* This is a split transaction; check the bandwidth available for |
* the completion too. Check both worst and best case gaps: worst |
* case is SPLIT near uframe end, and CSPLIT near start ... best is |
* vice versa. Difference can be almost two uframe times, but we |
* reserve unnecessary bandwidth (waste it) this way. (Actually |
* even better cases exist, like immediate device NAK.) |
* |
* FIXME don't even bother unless we know this TT is idle in that |
* range of uframes ... for now, check_period() allows only one |
* interrupt transfer per frame, so needn't check "TT busy" status |
* when scheduling a split (QH, SITD, or FSTN). |
* |
* FIXME ehci 0.96 and above can use FSTNs |
*/ |
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, |
qh->period, qh->c_usecs)) |
goto done; |
if (!check_period (ehci, frame, uframe + qh->gap_uf, |
qh->period, qh->c_usecs)) |
goto done; |
*c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf)); |
retval = 0; |
done: |
return retval; |
} |
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
{ |
int status; |
unsigned uframe; |
u32 c_mask; |
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ |
qh->hw_next = EHCI_LIST_END; |
frame = qh->start; |
/* reuse the previous schedule slots, if we can */ |
if (frame < qh->period) { |
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); |
status = check_intr_schedule (ehci, frame, --uframe, |
qh, &c_mask); |
} else { |
uframe = 0; |
c_mask = 0; |
status = -ENOSPC; |
} |
/* else scan the schedule to find a group of slots such that all |
* uframes have enough periodic bandwidth available. |
*/ |
if (status) { |
frame = qh->period - 1; |
do { |
for (uframe = 0; uframe < 8; uframe++) { |
status = check_intr_schedule (ehci, |
frame, uframe, qh, |
&c_mask); |
if (status == 0) |
break; |
} |
} while (status && frame--); |
if (status) |
goto done; |
qh->start = frame; |
/* reset S-frame and (maybe) C-frame masks */ |
qh->hw_info2 &= ~0xffff; |
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; |
} else |
dbg ("reused previous qh %p schedule", qh); |
/* stuff into the periodic schedule */ |
qh->qh_state = QH_STATE_LINKED; |
dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)", |
qh, qh->usecs, qh->c_usecs, |
qh->period, frame, uframe, qh->gap_uf); |
do { |
if (unlikely (ehci->pshadow [frame].ptr != 0)) { |
// FIXME -- just link toward the end, before any qh with a shorter period, |
// AND accommodate it already having been linked here (after some other qh) |
// AS WELL AS updating the schedule checking logic |
BUG (); |
} else { |
ehci->pshadow [frame].qh = qh_get (qh); |
ehci->periodic [frame] = |
QH_NEXT (qh->qh_dma); |
} |
wmb (); |
frame += qh->period; |
} while (frame < ehci->periodic_size); |
/* update per-qh bandwidth for usbfs */ |
hcd_to_bus (&ehci->hcd)->bandwidth_allocated += |
(qh->usecs + qh->c_usecs) / qh->period; |
/* maybe enable periodic schedule processing */ |
if (!ehci->periodic_sched++) |
status = enable_periodic (ehci); |
done: |
return status; |
} |
static int intr_submit ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
struct list_head *qtd_list, |
int mem_flags |
) { |
unsigned epnum; |
unsigned long flags; |
struct ehci_qh *qh; |
struct hcd_dev *dev; |
int is_input; |
int status = 0; |
struct list_head empty; |
/* get endpoint and transfer/schedule data */ |
epnum = usb_pipeendpoint (urb->pipe); |
is_input = usb_pipein (urb->pipe); |
if (is_input) |
epnum |= 0x10; |
spin_lock_irqsave (&ehci->lock, flags); |
dev = (struct hcd_dev *)urb->dev->hcpriv; |
/* get qh and force any scheduling errors */ |
INIT_LIST_HEAD (&empty); |
qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]); |
if (qh == 0) { |
status = -ENOMEM; |
goto done; |
} |
if (qh->qh_state == QH_STATE_IDLE) { |
if ((status = qh_schedule (ehci, qh)) != 0) |
goto done; |
} |
/* then queue the urb's tds to the qh */ |
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]); |
BUG_ON (qh == 0); |
/* ... update usbfs periodic stats */ |
hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++; |
done: |
spin_unlock_irqrestore (&ehci->lock, flags); |
if (status) |
qtd_list_free (ehci, urb, qtd_list); |
return status; |
} |
static unsigned |
intr_complete ( |
struct ehci_hcd *ehci, |
unsigned frame, |
struct ehci_qh *qh, |
struct pt_regs *regs |
) { |
unsigned count; |
/* nothing to report? */ |
if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE)) |
!= 0)) |
return 0; |
if (unlikely (list_empty (&qh->qtd_list))) { |
dbg ("intr qh %p no TDs?", qh); |
return 0; |
} |
/* handle any completions */ |
count = qh_completions (ehci, qh, regs); |
if (unlikely (list_empty (&qh->qtd_list))) |
intr_deschedule (ehci, qh, 0); |
return count; |
} |
/*-------------------------------------------------------------------------*/ |
static void |
itd_free_list (struct ehci_hcd *ehci, struct urb *urb) |
{ |
struct ehci_itd *first_itd = urb->hcpriv; |
while (!list_empty (&first_itd->itd_list)) { |
struct ehci_itd *itd; |
itd = list_entry ( |
first_itd->itd_list.next, |
struct ehci_itd, itd_list); |
list_del (&itd->itd_list); |
pci_pool_free (ehci->itd_pool, itd, itd->itd_dma); |
} |
pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma); |
urb->hcpriv = 0; |
} |
static int |
itd_fill ( |
struct ehci_hcd *ehci, |
struct ehci_itd *itd, |
struct urb *urb, |
unsigned index, // urb->iso_frame_desc [index] |
dma_addr_t dma // mapped transfer buffer |
) { |
u64 temp; |
u32 buf1; |
unsigned i, epnum, maxp, multi; |
unsigned length; |
int is_input; |
itd->hw_next = EHCI_LIST_END; |
itd->urb = urb; |
itd->index = index; |
/* tell itd about its transfer buffer, max 2 pages */ |
length = urb->iso_frame_desc [index].length; |
dma += urb->iso_frame_desc [index].offset; |
temp = dma & ~0x0fff; |
for (i = 0; i < 2; i++) { |
itd->hw_bufp [i] = cpu_to_le32 ((u32) temp); |
itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32)); |
temp += 0x1000; |
} |
itd->buf_dma = dma; |
/* |
* this might be a "high bandwidth" highspeed endpoint, |
* as encoded in the ep descriptor's maxpacket field |
*/ |
epnum = usb_pipeendpoint (urb->pipe); |
is_input = usb_pipein (urb->pipe); |
if (is_input) { |
maxp = urb->dev->epmaxpacketin [epnum]; |
buf1 = (1 << 11); |
} else { |
maxp = urb->dev->epmaxpacketout [epnum]; |
buf1 = 0; |
} |
buf1 |= (maxp & 0x03ff); |
multi = 1; |
multi += (maxp >> 11) & 0x03; |
maxp &= 0x03ff; |
maxp *= multi; |
/* transfer can't fit in any uframe? */ |
if (length < 0 || maxp < length) { |
dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)", |
length, maxp, urb, index, |
urb->iso_frame_desc [index].length); |
return -ENOSPC; |
} |
itd->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 1, length); |
/* "plus" info in low order bits of buffer pointers */ |
itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum); |
itd->hw_bufp [1] |= cpu_to_le32 (buf1); |
itd->hw_bufp [2] |= cpu_to_le32 (multi); |
/* figure hw_transaction[] value (it's scheduled later) */ |
itd->transaction = EHCI_ISOC_ACTIVE; |
itd->transaction |= dma & 0x0fff; /* offset; buffer=0 */ |
if ((index + 1) == urb->number_of_packets) |
itd->transaction |= EHCI_ITD_IOC; /* end-of-urb irq */ |
itd->transaction |= length << 16; |
cpu_to_le32s (&itd->transaction); |
return 0; |
} |
static int |
itd_urb_transaction ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
int mem_flags |
) { |
int frame_index; |
struct ehci_itd *first_itd, *itd; |
int status; |
dma_addr_t itd_dma; |
/* allocate/init ITDs */ |
for (frame_index = 0, first_itd = 0; |
frame_index < urb->number_of_packets; |
frame_index++) { |
itd = pci_pool_alloc (ehci->itd_pool, mem_flags, &itd_dma); |
if (!itd) { |
status = -ENOMEM; |
goto fail; |
} |
memset (itd, 0, sizeof *itd); |
itd->itd_dma = itd_dma; |
status = itd_fill (ehci, itd, urb, frame_index, |
urb->transfer_dma); |
if (status != 0) |
goto fail; |
if (first_itd) |
list_add_tail (&itd->itd_list, |
&first_itd->itd_list); |
else { |
INIT_LIST_HEAD (&itd->itd_list); |
urb->hcpriv = first_itd = itd; |
} |
} |
urb->error_count = 0; |
return 0; |
fail: |
if (urb->hcpriv) |
itd_free_list (ehci, urb); |
return status; |
} |
/*-------------------------------------------------------------------------*/ |
static inline void |
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) |
{ |
/* always prepend ITD/SITD ... only QH tree is order-sensitive */ |
itd->itd_next = ehci->pshadow [frame]; |
itd->hw_next = ehci->periodic [frame]; |
ehci->pshadow [frame].itd = itd; |
ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; |
} |
/* |
* return zero on success, else -errno |
* - start holds first uframe to start scheduling into |
* - max is the first uframe it's NOT (!) OK to start scheduling into |
* math to be done modulo "mod" (ehci->periodic_size << 3) |
*/ |
static int get_iso_range ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
unsigned *start, |
unsigned *max, |
unsigned mod |
) { |
struct list_head *lh; |
struct hcd_dev *dev = urb->dev->hcpriv; |
int last = -1; |
unsigned now, span, end; |
span = urb->interval * urb->number_of_packets; |
/* first see if we know when the next transfer SHOULD happen */ |
list_for_each (lh, &dev->urb_list) { |
struct urb *u; |
struct ehci_itd *itd; |
unsigned s; |
u = list_entry (lh, struct urb, urb_list); |
if (u == urb || u->pipe != urb->pipe) |
continue; |
if (u->interval != urb->interval) { /* must not change! */ |
dbg ("urb %p interval %d ... != %p interval %d", |
u, u->interval, urb, urb->interval); |
return -EINVAL; |
} |
/* URB for this endpoint... covers through when? */ |
itd = urb->hcpriv; |
s = itd->uframe + u->interval * u->number_of_packets; |
if (last < 0) |
last = s; |
else { |
/* |
* So far we can only queue two ISO URBs... |
* |
* FIXME do interval math, figure out whether |
* this URB is "before" or not ... also, handle |
* the case where the URB might have completed, |
* but hasn't yet been processed. |
*/ |
dbg ("NYET: queue >2 URBs per ISO endpoint"); |
return -EDOM; |
} |
} |
/* calculate the legal range [start,max) */ |
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */ |
if (!ehci->periodic_sched) |
now += 8; /* startup delay */ |
now %= mod; |
end = now + mod; |
if (last < 0) { |
*start = now + ehci->i_thresh + /* paranoia */ 1; |
*max = end - span; |
if (*max < *start + 1) |
*max = *start + 1; |
} else { |
*start = last % mod; |
*max = (last + 1) % mod; |
} |
/* explicit start frame? */ |
if (!(urb->transfer_flags & URB_ISO_ASAP)) { |
unsigned temp; |
/* sanity check: must be in range */ |
urb->start_frame %= ehci->periodic_size; |
temp = urb->start_frame << 3; |
if (temp < *start) |
temp += mod; |
if (temp > *max) |
return -EDOM; |
/* use that explicit start frame */ |
*start = urb->start_frame << 3; |
temp += 8; |
if (temp < *max) |
*max = temp; |
} |
// FIXME minimize wraparound to "now" ... insist max+span |
// (and start+span) remains a few frames short of "end" |
*max %= ehci->periodic_size; |
if ((*start + span) < end) |
return 0; |
return -EFBIG; |
} |
static int |
itd_schedule (struct ehci_hcd *ehci, struct urb *urb) |
{ |
unsigned start, max, i; |
int status; |
unsigned mod = ehci->periodic_size << 3; |
for (i = 0; i < urb->number_of_packets; i++) { |
urb->iso_frame_desc [i].status = -EINPROGRESS; |
urb->iso_frame_desc [i].actual_length = 0; |
} |
if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0) |
return status; |
do { |
unsigned uframe; |
unsigned usecs; |
struct ehci_itd *itd; |
/* check schedule: enough space? */ |
itd = urb->hcpriv; |
uframe = start; |
for (i = 0, uframe = start; |
i < urb->number_of_packets; |
i++, uframe += urb->interval) { |
uframe %= mod; |
/* can't commit more than 80% periodic == 100 usec */ |
if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) |
> (100 - itd->usecs)) { |
itd = 0; |
break; |
} |
itd = list_entry (itd->itd_list.next, |
struct ehci_itd, itd_list); |
} |
if (!itd) |
continue; |
/* that's where we'll schedule this! */ |
itd = urb->hcpriv; |
urb->start_frame = start >> 3; |
vdbg ("ISO urb %p (%d packets period %d) starting %d.%d", |
urb, urb->number_of_packets, urb->interval, |
urb->start_frame, start & 0x7); |
for (i = 0, uframe = start, usecs = 0; |
i < urb->number_of_packets; |
i++, uframe += urb->interval) { |
uframe %= mod; |
itd->uframe = uframe; |
itd->hw_transaction [uframe & 0x07] = itd->transaction; |
itd_link (ehci, (uframe >> 3) % ehci->periodic_size, |
itd); |
wmb (); |
usecs += itd->usecs; |
itd = list_entry (itd->itd_list.next, |
struct ehci_itd, itd_list); |
} |
/* update bandwidth utilization records (for usbfs) |
* |
* FIXME This claims each URB queued to an endpoint, as if |
* transfers were concurrent, not sequential. So bandwidth |
* typically gets double-billed ... comes from tying it to |
* URBs rather than endpoints in the schedule. Luckily we |
* don't use this usbfs data for serious decision making. |
*/ |
usecs /= urb->number_of_packets; |
usecs /= urb->interval; |
usecs >>= 3; |
if (usecs < 1) |
usecs = 1; |
usb_claim_bandwidth (urb->dev, urb, usecs, 1); |
/* maybe enable periodic schedule processing */ |
if (!ehci->periodic_sched++) { |
if ((status = enable_periodic (ehci)) != 0) { |
// FIXME deschedule right away |
err ("itd_schedule, enable = %d", status); |
} |
} |
return 0; |
} while ((start = ++start % mod) != max); |
/* no room in the schedule */ |
dbg ("urb %p, CAN'T SCHEDULE", urb); |
return -ENOSPC; |
} |
/*-------------------------------------------------------------------------*/ |
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) |
static unsigned |
itd_complete ( |
struct ehci_hcd *ehci, |
struct ehci_itd *itd, |
unsigned uframe, |
struct pt_regs *regs |
) { |
struct urb *urb = itd->urb; |
struct usb_iso_packet_descriptor *desc; |
u32 t; |
/* update status for this uframe's transfers */ |
desc = &urb->iso_frame_desc [itd->index]; |
t = itd->hw_transaction [uframe]; |
itd->hw_transaction [uframe] = 0; |
if (t & EHCI_ISOC_ACTIVE) |
desc->status = -EXDEV; |
else if (t & ISO_ERRS) { |
urb->error_count++; |
if (t & EHCI_ISOC_BUF_ERR) |
desc->status = usb_pipein (urb->pipe) |
? -ENOSR /* couldn't read */ |
: -ECOMM; /* couldn't write */ |
else if (t & EHCI_ISOC_BABBLE) |
desc->status = -EOVERFLOW; |
else /* (t & EHCI_ISOC_XACTERR) */ |
desc->status = -EPROTO; |
/* HC need not update length with this error */ |
if (!(t & EHCI_ISOC_BABBLE)) |
desc->actual_length += EHCI_ITD_LENGTH (t); |
} else { |
desc->status = 0; |
desc->actual_length += EHCI_ITD_LENGTH (t); |
} |
vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d", |
itd, urb, itd->index + 1, urb->number_of_packets, |
t, desc->status, desc->actual_length); |
/* handle completion now? */ |
if ((itd->index + 1) != urb->number_of_packets) |
return 0; |
/* |
* Always give the urb back to the driver ... expect it to submit |
* a new urb (or resubmit this), and to have another already queued |
* when un-interrupted transfers are needed. |
* |
* NOTE that for now we don't accelerate ISO unlinks; they just |
* happen according to the current schedule. Means a delay of |
* up to about a second (max). |
*/ |
itd_free_list (ehci, urb); |
if (urb->status == -EINPROGRESS) |
urb->status = 0; |
/* complete() can reenter this HCD */ |
spin_unlock (&ehci->lock); |
usb_hcd_giveback_urb (&ehci->hcd, urb, regs); |
spin_lock (&ehci->lock); |
/* defer stopping schedule; completion can submit */ |
ehci->periodic_sched--; |
if (!ehci->periodic_sched) |
(void) disable_periodic (ehci); |
return 1; |
} |
/*-------------------------------------------------------------------------*/ |
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
{ |
int status; |
unsigned long flags; |
dbg ("itd_submit urb %p", urb); |
/* allocate ITDs w/o locking anything */ |
status = itd_urb_transaction (ehci, urb, mem_flags); |
if (status < 0) |
return status; |
/* schedule ... need to lock */ |
spin_lock_irqsave (&ehci->lock, flags); |
status = itd_schedule (ehci, urb); |
spin_unlock_irqrestore (&ehci->lock, flags); |
if (status < 0) |
itd_free_list (ehci, urb); |
return status; |
} |
#ifdef have_split_iso |
/*-------------------------------------------------------------------------*/ |
/* |
* "Split ISO TDs" ... used for USB 1.1 devices going through |
* the TTs in USB 2.0 hubs. |
* |
* FIXME not yet implemented |
*/ |
#endif /* have_split_iso */ |
/*-------------------------------------------------------------------------*/ |
static void |
scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) |
{ |
unsigned frame, clock, now_uframe, mod; |
unsigned count = 0; |
mod = ehci->periodic_size << 3; |
/* |
* When running, scan from last scan point up to "now" |
* else clean up by scanning everything that's left. |
* Touches as few pages as possible: cache-friendly. |
* Don't scan ISO entries more than once, though. |
*/ |
frame = ehci->next_uframe >> 3; |
if (HCD_IS_RUNNING (ehci->hcd.state)) |
now_uframe = readl (&ehci->regs->frame_index); |
else |
now_uframe = (frame << 3) - 1; |
now_uframe %= mod; |
clock = now_uframe >> 3; |
for (;;) { |
union ehci_shadow q, *q_p; |
u32 type, *hw_p; |
unsigned uframes; |
restart: |
/* scan schedule to _before_ current frame index */ |
if (frame == clock) |
uframes = now_uframe & 0x07; |
else |
uframes = 8; |
q_p = &ehci->pshadow [frame]; |
hw_p = &ehci->periodic [frame]; |
q.ptr = q_p->ptr; |
type = Q_NEXT_TYPE (*hw_p); |
/* scan each element in frame's queue for completions */ |
while (q.ptr != 0) { |
int last; |
unsigned uf; |
union ehci_shadow temp; |
switch (type) { |
case Q_TYPE_QH: |
last = (q.qh->hw_next == EHCI_LIST_END); |
temp = q.qh->qh_next; |
type = Q_NEXT_TYPE (q.qh->hw_next); |
count += intr_complete (ehci, frame, |
qh_get (q.qh), regs); |
qh_put (ehci, q.qh); |
q = temp; |
break; |
case Q_TYPE_FSTN: |
last = (q.fstn->hw_next == EHCI_LIST_END); |
/* for "save place" FSTNs, look at QH entries |
* in the previous frame for completions. |
*/ |
if (q.fstn->hw_prev != EHCI_LIST_END) { |
dbg ("ignoring completions from FSTNs"); |
} |
type = Q_NEXT_TYPE (q.fstn->hw_next); |
q = q.fstn->fstn_next; |
break; |
case Q_TYPE_ITD: |
last = (q.itd->hw_next == EHCI_LIST_END); |
/* Unlink each (S)ITD we see, since the ISO |
* URB model forces constant rescheduling. |
* That complicates sharing uframes in ITDs, |
* and means we need to skip uframes the HC |
* hasn't yet processed. |
*/ |
for (uf = 0; uf < uframes; uf++) { |
if (q.itd->hw_transaction [uf] != 0) { |
temp = q; |
*q_p = q.itd->itd_next; |
*hw_p = q.itd->hw_next; |
type = Q_NEXT_TYPE (*hw_p); |
/* might free q.itd ... */ |
count += itd_complete (ehci, |
temp.itd, uf, regs); |
break; |
} |
} |
/* we might skip this ITD's uframe ... */ |
if (uf == uframes) { |
q_p = &q.itd->itd_next; |
hw_p = &q.itd->hw_next; |
type = Q_NEXT_TYPE (q.itd->hw_next); |
} |
q = *q_p; |
break; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
last = (q.sitd->hw_next == EHCI_LIST_END); |
sitd_complete (ehci, q.sitd); |
type = Q_NEXT_TYPE (q.sitd->hw_next); |
// FIXME unlink SITD after split completes |
q = q.sitd->sitd_next; |
break; |
#endif /* have_split_iso */ |
default: |
dbg ("corrupt type %d frame %d shadow %p", |
type, frame, q.ptr); |
// BUG (); |
last = 1; |
q.ptr = 0; |
} |
/* did completion remove an interior q entry? */ |
if (unlikely (q.ptr == 0 && !last)) |
goto restart; |
} |
/* stop when we catch up to the HC */ |
// FIXME: this assumes we won't get lapped when |
// latencies climb; that should be rare, but... |
// detect it, and just go all the way around. |
// FLR might help detect this case, so long as latencies |
// don't exceed periodic_size msec (default 1.024 sec). |
// FIXME: likewise assumes HC doesn't halt mid-scan |
if (frame == clock) { |
unsigned now; |
if (!HCD_IS_RUNNING (ehci->hcd.state)) |
break; |
ehci->next_uframe = now_uframe; |
now = readl (&ehci->regs->frame_index) % mod; |
if (now_uframe == now) |
break; |
/* rescan the rest of this frame, then ... */ |
now_uframe = now; |
clock = now_uframe >> 3; |
} else |
frame = (frame + 1) % ehci->periodic_size; |
} |
} |
/* |
* Copyright (c) 2001-2002 by David Brownell |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License as published by the |
* Free Software Foundation; either version 2 of the License, or (at your |
* option) any later version. |
* |
* This program is distributed in the hope that it will be useful, but |
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
* for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software Foundation, |
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
/* this file is part of ehci-hcd.c */ |
/*-------------------------------------------------------------------------*/ |
/* |
* EHCI scheduled transaction support: interrupt, iso, split iso |
* These are called "periodic" transactions in the EHCI spec. |
* |
* Note that for interrupt transfers, the QH/QTD manipulation is shared |
* with the "asynchronous" transaction support (control/bulk transfers). |
* The only real difference is in how interrupt transfers are scheduled. |
* We get some funky API restrictions from the current URB model, which |
* works notably better for reading transfers than for writing. (And |
* which accordingly needs to change before it'll work inside devices, |
* or with "USB On The Go" additions to USB 2.0 ...) |
*/ |
static int ehci_get_frame (struct usb_hcd *hcd); |
/*-------------------------------------------------------------------------*/ |
/* |
* periodic_next_shadow - return "next" pointer on shadow list |
* @periodic: host pointer to qh/itd/sitd |
* @tag: hardware tag for type of this record |
*/ |
static union ehci_shadow * |
periodic_next_shadow (union ehci_shadow *periodic, int tag) |
{ |
switch (tag) { |
case Q_TYPE_QH: |
return &periodic->qh->qh_next; |
case Q_TYPE_FSTN: |
return &periodic->fstn->fstn_next; |
case Q_TYPE_ITD: |
return &periodic->itd->itd_next; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
return &periodic->sitd->sitd_next; |
#endif /* have_split_iso */ |
} |
dbg ("BAD shadow %p tag %d", periodic->ptr, tag); |
// BUG (); |
return 0; |
} |
/* returns true after successful unlink */ |
/* caller must hold ehci->lock */ |
static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) |
{ |
union ehci_shadow *prev_p = &ehci->pshadow [frame]; |
u32 *hw_p = &ehci->periodic [frame]; |
union ehci_shadow here = *prev_p; |
union ehci_shadow *next_p; |
/* find predecessor of "ptr"; hw and shadow lists are in sync */ |
while (here.ptr && here.ptr != ptr) { |
prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); |
hw_p = &here.qh->hw_next; |
here = *prev_p; |
} |
/* an interrupt entry (at list end) could have been shared */ |
if (!here.ptr) { |
dbg ("entry %p no longer on frame [%d]", ptr, frame); |
return 0; |
} |
// vdbg ("periodic unlink %p from frame %d", ptr, frame); |
/* update hardware list ... HC may still know the old structure, so |
* don't change hw_next until it'll have purged its cache |
*/ |
next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); |
*hw_p = here.qh->hw_next; |
/* unlink from shadow list; HCD won't see old structure again */ |
*prev_p = *next_p; |
next_p->ptr = 0; |
return 1; |
} |
/* how many of the uframe's 125 usecs are allocated? */ |
static unsigned short |
periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) |
{ |
u32 *hw_p = &ehci->periodic [frame]; |
union ehci_shadow *q = &ehci->pshadow [frame]; |
unsigned usecs = 0; |
while (q->ptr) { |
switch (Q_NEXT_TYPE (*hw_p)) { |
case Q_TYPE_QH: |
/* is it in the S-mask? */ |
if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) |
usecs += q->qh->usecs; |
/* ... or C-mask? */ |
if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) |
usecs += q->qh->c_usecs; |
q = &q->qh->qh_next; |
break; |
case Q_TYPE_FSTN: |
/* for "save place" FSTNs, count the relevant INTR |
* bandwidth from the previous frame |
*/ |
if (q->fstn->hw_prev != EHCI_LIST_END) { |
dbg ("not counting FSTN bandwidth yet ..."); |
} |
q = &q->fstn->fstn_next; |
break; |
case Q_TYPE_ITD: |
/* NOTE the "one uframe per itd" policy */ |
if (q->itd->hw_transaction [uframe] != 0) |
usecs += q->itd->usecs; |
q = &q->itd->itd_next; |
break; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
temp = q->sitd->hw_fullspeed_ep & |
__constant_cpu_to_le32 (1 << 31); |
// FIXME: this doesn't count data bytes right... |
/* is it in the S-mask? (count SPLIT, DATA) */ |
if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { |
if (temp) |
usecs += HS_USECS (188); |
else |
usecs += HS_USECS (1); |
} |
/* ... C-mask? (count CSPLIT, DATA) */ |
if (q->sitd->hw_uframe & |
cpu_to_le32 (1 << (8 + uframe))) { |
if (temp) |
usecs += HS_USECS (0); |
else |
usecs += HS_USECS (188); |
} |
q = &q->sitd->sitd_next; |
break; |
#endif /* have_split_iso */ |
default: |
BUG (); |
} |
} |
#ifdef DEBUG |
if (usecs > 100) |
err ("overallocated uframe %d, periodic is %d usecs", |
frame * 8 + uframe, usecs); |
#endif |
return usecs; |
} |
/*-------------------------------------------------------------------------*/ |
static int enable_periodic (struct ehci_hcd *ehci) |
{ |
u32 cmd; |
int status; |
/* did clearing PSE did take effect yet? |
* takes effect only at frame boundaries... |
*/ |
status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); |
if (status != 0) { |
ehci->hcd.state = USB_STATE_HALT; |
return status; |
} |
cmd = readl (&ehci->regs->command) | CMD_PSE; |
writel (cmd, &ehci->regs->command); |
/* posted write ... PSS happens later */ |
ehci->hcd.state = USB_STATE_RUNNING; |
/* make sure ehci_work scans these */ |
ehci->next_uframe = readl (&ehci->regs->frame_index) |
% (ehci->periodic_size << 3); |
return 0; |
} |
static int disable_periodic (struct ehci_hcd *ehci) |
{ |
u32 cmd; |
int status; |
/* did setting PSE not take effect yet? |
* takes effect only at frame boundaries... |
*/ |
status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); |
if (status != 0) { |
ehci->hcd.state = USB_STATE_HALT; |
return status; |
} |
cmd = readl (&ehci->regs->command) & ~CMD_PSE; |
writel (cmd, &ehci->regs->command); |
/* posted write ... */ |
ehci->next_uframe = -1; |
return 0; |
} |
/*-------------------------------------------------------------------------*/ |
// FIXME microframe periods not yet handled |
static void intr_deschedule ( |
struct ehci_hcd *ehci, |
struct ehci_qh *qh, |
int wait |
) { |
int status; |
unsigned frame = qh->start; |
do { |
periodic_unlink (ehci, frame, qh); |
qh_put (ehci, qh); |
frame += qh->period; |
} while (frame < ehci->periodic_size); |
qh->qh_state = QH_STATE_UNLINK; |
qh->qh_next.ptr = 0; |
ehci->periodic_sched--; |
/* maybe turn off periodic schedule */ |
if (!ehci->periodic_sched) |
status = disable_periodic (ehci); |
else { |
status = 0; |
vdbg ("periodic schedule still enabled"); |
} |
/* |
* If the hc may be looking at this qh, then delay a uframe |
* (yeech!) to be sure it's done. |
* No other threads may be mucking with this qh. |
*/ |
if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) { |
if (wait) { |
udelay (125); |
qh->hw_next = EHCI_LIST_END; |
} else { |
/* we may not be IDLE yet, but if the qh is empty |
* the race is very short. then if qh also isn't |
* rescheduled soon, it won't matter. otherwise... |
*/ |
vdbg ("intr_deschedule..."); |
} |
} else |
qh->hw_next = EHCI_LIST_END; |
qh->qh_state = QH_STATE_IDLE; |
/* update per-qh bandwidth utilization (for usbfs) */ |
hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= |
(qh->usecs + qh->c_usecs) / qh->period; |
dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d", |
qh, qh->period, frame, |
atomic_read (&qh->refcount), ehci->periodic_sched); |
} |
static int check_period ( |
struct ehci_hcd *ehci, |
unsigned frame, |
unsigned uframe, |
unsigned period, |
unsigned usecs |
) { |
/* complete split running into next frame? |
* given FSTN support, we could sometimes check... |
*/ |
if (uframe >= 8) |
return 0; |
/* |
* 80% periodic == 100 usec/uframe available |
* convert "usecs we need" to "max already claimed" |
*/ |
usecs = 100 - usecs; |
do { |
int claimed; |
// FIXME delete when intr_submit handles non-empty queues |
// this gives us a one intr/frame limit (vs N/uframe) |
// ... and also lets us avoid tracking split transactions |
// that might collide at a given TT/hub. |
if (ehci->pshadow [frame].ptr) |
return 0; |
claimed = periodic_usecs (ehci, frame, uframe); |
if (claimed > usecs) |
return 0; |
// FIXME update to handle sub-frame periods |
} while ((frame += period) < ehci->periodic_size); |
// success! |
return 1; |
} |
static int check_intr_schedule ( |
struct ehci_hcd *ehci, |
unsigned frame, |
unsigned uframe, |
const struct ehci_qh *qh, |
u32 *c_maskp |
) |
{ |
int retval = -ENOSPC; |
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) |
goto done; |
if (!qh->c_usecs) { |
retval = 0; |
*c_maskp = cpu_to_le32 (0); |
goto done; |
} |
/* This is a split transaction; check the bandwidth available for |
* the completion too. Check both worst and best case gaps: worst |
* case is SPLIT near uframe end, and CSPLIT near start ... best is |
* vice versa. Difference can be almost two uframe times, but we |
* reserve unnecessary bandwidth (waste it) this way. (Actually |
* even better cases exist, like immediate device NAK.) |
* |
* FIXME don't even bother unless we know this TT is idle in that |
* range of uframes ... for now, check_period() allows only one |
* interrupt transfer per frame, so needn't check "TT busy" status |
* when scheduling a split (QH, SITD, or FSTN). |
* |
* FIXME ehci 0.96 and above can use FSTNs |
*/ |
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, |
qh->period, qh->c_usecs)) |
goto done; |
if (!check_period (ehci, frame, uframe + qh->gap_uf, |
qh->period, qh->c_usecs)) |
goto done; |
*c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf)); |
retval = 0; |
done: |
return retval; |
} |
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
{ |
int status; |
unsigned uframe; |
u32 c_mask; |
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ |
qh->hw_next = EHCI_LIST_END; |
frame = qh->start; |
/* reuse the previous schedule slots, if we can */ |
if (frame < qh->period) { |
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); |
status = check_intr_schedule (ehci, frame, --uframe, |
qh, &c_mask); |
} else { |
uframe = 0; |
c_mask = 0; |
status = -ENOSPC; |
} |
/* else scan the schedule to find a group of slots such that all |
* uframes have enough periodic bandwidth available. |
*/ |
if (status) { |
frame = qh->period - 1; |
do { |
for (uframe = 0; uframe < 8; uframe++) { |
status = check_intr_schedule (ehci, |
frame, uframe, qh, |
&c_mask); |
if (status == 0) |
break; |
} |
} while (status && frame--); |
if (status) |
goto done; |
qh->start = frame; |
/* reset S-frame and (maybe) C-frame masks */ |
qh->hw_info2 &= ~0xffff; |
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; |
} else |
dbg ("reused previous qh %p schedule", qh); |
/* stuff into the periodic schedule */ |
qh->qh_state = QH_STATE_LINKED; |
dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)", |
qh, qh->usecs, qh->c_usecs, |
qh->period, frame, uframe, qh->gap_uf); |
do { |
if (unlikely (ehci->pshadow [frame].ptr != 0)) { |
// FIXME -- just link toward the end, before any qh with a shorter period, |
// AND accommodate it already having been linked here (after some other qh) |
// AS WELL AS updating the schedule checking logic |
BUG (); |
} else { |
ehci->pshadow [frame].qh = qh_get (qh); |
ehci->periodic [frame] = |
QH_NEXT (qh->qh_dma); |
} |
wmb (); |
frame += qh->period; |
} while (frame < ehci->periodic_size); |
/* update per-qh bandwidth for usbfs */ |
hcd_to_bus (&ehci->hcd)->bandwidth_allocated += |
(qh->usecs + qh->c_usecs) / qh->period; |
/* maybe enable periodic schedule processing */ |
if (!ehci->periodic_sched++) |
status = enable_periodic (ehci); |
done: |
return status; |
} |
static int intr_submit ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
struct list_head *qtd_list, |
int mem_flags |
) { |
unsigned epnum; |
unsigned long flags; |
struct ehci_qh *qh; |
struct hcd_dev *dev; |
int is_input; |
int status = 0; |
struct list_head empty; |
/* get endpoint and transfer/schedule data */ |
epnum = usb_pipeendpoint (urb->pipe); |
is_input = usb_pipein (urb->pipe); |
if (is_input) |
epnum |= 0x10; |
spin_lock_irqsave (&ehci->lock, flags); |
dev = (struct hcd_dev *)urb->dev->hcpriv; |
/* get qh and force any scheduling errors */ |
INIT_LIST_HEAD (&empty); |
qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]); |
if (qh == 0) { |
status = -ENOMEM; |
goto done; |
} |
if (qh->qh_state == QH_STATE_IDLE) { |
if ((status = qh_schedule (ehci, qh)) != 0) |
goto done; |
} |
/* then queue the urb's tds to the qh */ |
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]); |
BUG_ON (qh == 0); |
/* ... update usbfs periodic stats */ |
hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++; |
done: |
spin_unlock_irqrestore (&ehci->lock, flags); |
if (status) |
qtd_list_free (ehci, urb, qtd_list); |
return status; |
} |
static unsigned |
intr_complete ( |
struct ehci_hcd *ehci, |
unsigned frame, |
struct ehci_qh *qh, |
struct pt_regs *regs |
) { |
unsigned count; |
/* nothing to report? */ |
if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE)) |
!= 0)) |
return 0; |
if (unlikely (list_empty (&qh->qtd_list))) { |
dbg ("intr qh %p no TDs?", qh); |
return 0; |
} |
/* handle any completions */ |
count = qh_completions (ehci, qh, regs); |
if (unlikely (list_empty (&qh->qtd_list))) |
intr_deschedule (ehci, qh, 0); |
return count; |
} |
/*-------------------------------------------------------------------------*/ |
static void |
itd_free_list (struct ehci_hcd *ehci, struct urb *urb) |
{ |
struct ehci_itd *first_itd = urb->hcpriv; |
while (!list_empty (&first_itd->itd_list)) { |
struct ehci_itd *itd; |
itd = list_entry ( |
first_itd->itd_list.next, |
struct ehci_itd, itd_list); |
list_del (&itd->itd_list); |
pci_pool_free (ehci->itd_pool, itd, itd->itd_dma); |
} |
pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma); |
urb->hcpriv = 0; |
} |
static int |
itd_fill ( |
struct ehci_hcd *ehci, |
struct ehci_itd *itd, |
struct urb *urb, |
unsigned index, // urb->iso_frame_desc [index] |
dma_addr_t dma // mapped transfer buffer |
) { |
u64 temp; |
u32 buf1; |
unsigned i, epnum, maxp, multi; |
unsigned length; |
int is_input; |
itd->hw_next = EHCI_LIST_END; |
itd->urb = urb; |
itd->index = index; |
/* tell itd about its transfer buffer, max 2 pages */ |
length = urb->iso_frame_desc [index].length; |
dma += urb->iso_frame_desc [index].offset; |
temp = dma & ~0x0fff; |
for (i = 0; i < 2; i++) { |
itd->hw_bufp [i] = cpu_to_le32 ((u32) temp); |
itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32)); |
temp += 0x1000; |
} |
itd->buf_dma = dma; |
/* |
* this might be a "high bandwidth" highspeed endpoint, |
* as encoded in the ep descriptor's maxpacket field |
*/ |
epnum = usb_pipeendpoint (urb->pipe); |
is_input = usb_pipein (urb->pipe); |
if (is_input) { |
maxp = urb->dev->epmaxpacketin [epnum]; |
buf1 = (1 << 11); |
} else { |
maxp = urb->dev->epmaxpacketout [epnum]; |
buf1 = 0; |
} |
buf1 |= (maxp & 0x03ff); |
multi = 1; |
multi += (maxp >> 11) & 0x03; |
maxp &= 0x03ff; |
maxp *= multi; |
/* transfer can't fit in any uframe? */ |
if (length < 0 || maxp < length) { |
dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)", |
length, maxp, urb, index, |
urb->iso_frame_desc [index].length); |
return -ENOSPC; |
} |
itd->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 1, length); |
/* "plus" info in low order bits of buffer pointers */ |
itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum); |
itd->hw_bufp [1] |= cpu_to_le32 (buf1); |
itd->hw_bufp [2] |= cpu_to_le32 (multi); |
/* figure hw_transaction[] value (it's scheduled later) */ |
itd->transaction = EHCI_ISOC_ACTIVE; |
itd->transaction |= dma & 0x0fff; /* offset; buffer=0 */ |
if ((index + 1) == urb->number_of_packets) |
itd->transaction |= EHCI_ITD_IOC; /* end-of-urb irq */ |
itd->transaction |= length << 16; |
cpu_to_le32s (&itd->transaction); |
return 0; |
} |
static int |
itd_urb_transaction ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
int mem_flags |
) { |
int frame_index; |
struct ehci_itd *first_itd, *itd; |
int status; |
dma_addr_t itd_dma; |
/* allocate/init ITDs */ |
for (frame_index = 0, first_itd = 0; |
frame_index < urb->number_of_packets; |
frame_index++) { |
itd = pci_pool_alloc_usb (ehci->itd_pool, mem_flags, &itd_dma); |
if (!itd) { |
status = -ENOMEM; |
goto fail; |
} |
memset (itd, 0, sizeof *itd); |
itd->itd_dma = itd_dma; |
status = itd_fill (ehci, itd, urb, frame_index, |
urb->transfer_dma); |
if (status != 0) |
goto fail; |
if (first_itd) |
list_add_tail (&itd->itd_list, |
&first_itd->itd_list); |
else { |
INIT_LIST_HEAD (&itd->itd_list); |
urb->hcpriv = first_itd = itd; |
} |
} |
urb->error_count = 0; |
return 0; |
fail: |
if (urb->hcpriv) |
itd_free_list (ehci, urb); |
return status; |
} |
/*-------------------------------------------------------------------------*/ |
static inline void |
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) |
{ |
/* always prepend ITD/SITD ... only QH tree is order-sensitive */ |
itd->itd_next = ehci->pshadow [frame]; |
itd->hw_next = ehci->periodic [frame]; |
ehci->pshadow [frame].itd = itd; |
ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; |
} |
/* |
* return zero on success, else -errno |
* - start holds first uframe to start scheduling into |
* - max is the first uframe it's NOT (!) OK to start scheduling into |
* math to be done modulo "mod" (ehci->periodic_size << 3) |
*/ |
static int get_iso_range ( |
struct ehci_hcd *ehci, |
struct urb *urb, |
unsigned *start, |
unsigned *max, |
unsigned mod |
) { |
struct list_head *lh; |
struct hcd_dev *dev = urb->dev->hcpriv; |
int last = -1; |
unsigned now, span, end; |
span = urb->interval * urb->number_of_packets; |
/* first see if we know when the next transfer SHOULD happen */ |
list_for_each (lh, &dev->urb_list) { |
struct urb *u; |
struct ehci_itd *itd; |
unsigned s; |
u = list_entry (lh, struct urb, urb_list); |
if (u == urb || u->pipe != urb->pipe) |
continue; |
if (u->interval != urb->interval) { /* must not change! */ |
dbg ("urb %p interval %d ... != %p interval %d", |
u, u->interval, urb, urb->interval); |
return -EINVAL; |
} |
/* URB for this endpoint... covers through when? */ |
itd = urb->hcpriv; |
s = itd->uframe + u->interval * u->number_of_packets; |
if (last < 0) |
last = s; |
else { |
/* |
* So far we can only queue two ISO URBs... |
* |
* FIXME do interval math, figure out whether |
* this URB is "before" or not ... also, handle |
* the case where the URB might have completed, |
* but hasn't yet been processed. |
*/ |
dbg ("NYET: queue >2 URBs per ISO endpoint"); |
return -EDOM; |
} |
} |
/* calculate the legal range [start,max) */ |
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */ |
if (!ehci->periodic_sched) |
now += 8; /* startup delay */ |
now %= mod; |
end = now + mod; |
if (last < 0) { |
*start = now + ehci->i_thresh + /* paranoia */ 1; |
*max = end - span; |
if (*max < *start + 1) |
*max = *start + 1; |
} else { |
*start = last % mod; |
*max = (last + 1) % mod; |
} |
/* explicit start frame? */ |
if (!(urb->transfer_flags & URB_ISO_ASAP)) { |
unsigned temp; |
/* sanity check: must be in range */ |
urb->start_frame %= ehci->periodic_size; |
temp = urb->start_frame << 3; |
if (temp < *start) |
temp += mod; |
if (temp > *max) |
return -EDOM; |
/* use that explicit start frame */ |
*start = urb->start_frame << 3; |
temp += 8; |
if (temp < *max) |
*max = temp; |
} |
// FIXME minimize wraparound to "now" ... insist max+span |
// (and start+span) remains a few frames short of "end" |
*max %= ehci->periodic_size; |
if ((*start + span) < end) |
return 0; |
return -EFBIG; |
} |
static int |
itd_schedule (struct ehci_hcd *ehci, struct urb *urb) |
{ |
unsigned start, max, i; |
int status; |
unsigned mod = ehci->periodic_size << 3; |
for (i = 0; i < urb->number_of_packets; i++) { |
urb->iso_frame_desc [i].status = -EINPROGRESS; |
urb->iso_frame_desc [i].actual_length = 0; |
} |
if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0) |
return status; |
do { |
unsigned uframe; |
unsigned usecs; |
struct ehci_itd *itd; |
/* check schedule: enough space? */ |
itd = urb->hcpriv; |
uframe = start; |
for (i = 0, uframe = start; |
i < urb->number_of_packets; |
i++, uframe += urb->interval) { |
uframe %= mod; |
/* can't commit more than 80% periodic == 100 usec */ |
if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) |
> (100 - itd->usecs)) { |
itd = 0; |
break; |
} |
itd = list_entry (itd->itd_list.next, |
struct ehci_itd, itd_list); |
} |
if (!itd) |
continue; |
/* that's where we'll schedule this! */ |
itd = urb->hcpriv; |
urb->start_frame = start >> 3; |
vdbg ("ISO urb %p (%d packets period %d) starting %d.%d", |
urb, urb->number_of_packets, urb->interval, |
urb->start_frame, start & 0x7); |
for (i = 0, uframe = start, usecs = 0; |
i < urb->number_of_packets; |
i++, uframe += urb->interval) { |
uframe %= mod; |
itd->uframe = uframe; |
itd->hw_transaction [uframe & 0x07] = itd->transaction; |
itd_link (ehci, (uframe >> 3) % ehci->periodic_size, |
itd); |
wmb (); |
usecs += itd->usecs; |
itd = list_entry (itd->itd_list.next, |
struct ehci_itd, itd_list); |
} |
/* update bandwidth utilization records (for usbfs) |
* |
* FIXME This claims each URB queued to an endpoint, as if |
* transfers were concurrent, not sequential. So bandwidth |
* typically gets double-billed ... comes from tying it to |
* URBs rather than endpoints in the schedule. Luckily we |
* don't use this usbfs data for serious decision making. |
*/ |
usecs /= urb->number_of_packets; |
usecs /= urb->interval; |
usecs >>= 3; |
if (usecs < 1) |
usecs = 1; |
usb_claim_bandwidth (urb->dev, urb, usecs, 1); |
/* maybe enable periodic schedule processing */ |
if (!ehci->periodic_sched++) { |
if ((status = enable_periodic (ehci)) != 0) { |
// FIXME deschedule right away |
err ("itd_schedule, enable = %d", status); |
} |
} |
return 0; |
} while ((start = ++start % mod) != max); |
/* no room in the schedule */ |
dbg ("urb %p, CAN'T SCHEDULE", urb); |
return -ENOSPC; |
} |
/*-------------------------------------------------------------------------*/ |
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) |
static unsigned |
itd_complete ( |
struct ehci_hcd *ehci, |
struct ehci_itd *itd, |
unsigned uframe, |
struct pt_regs *regs |
) { |
struct urb *urb = itd->urb; |
struct usb_iso_packet_descriptor *desc; |
u32 t; |
/* update status for this uframe's transfers */ |
desc = &urb->iso_frame_desc [itd->index]; |
t = itd->hw_transaction [uframe]; |
itd->hw_transaction [uframe] = 0; |
if (t & EHCI_ISOC_ACTIVE) |
desc->status = -EXDEV; |
else if (t & ISO_ERRS) { |
urb->error_count++; |
if (t & EHCI_ISOC_BUF_ERR) |
desc->status = usb_pipein (urb->pipe) |
? -ENOSR /* couldn't read */ |
: -ECOMM; /* couldn't write */ |
else if (t & EHCI_ISOC_BABBLE) |
desc->status = -EOVERFLOW; |
else /* (t & EHCI_ISOC_XACTERR) */ |
desc->status = -EPROTO; |
/* HC need not update length with this error */ |
if (!(t & EHCI_ISOC_BABBLE)) |
desc->actual_length += EHCI_ITD_LENGTH (t); |
} else { |
desc->status = 0; |
desc->actual_length += EHCI_ITD_LENGTH (t); |
} |
vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d", |
itd, urb, itd->index + 1, urb->number_of_packets, |
t, desc->status, desc->actual_length); |
/* handle completion now? */ |
if ((itd->index + 1) != urb->number_of_packets) |
return 0; |
/* |
* Always give the urb back to the driver ... expect it to submit |
* a new urb (or resubmit this), and to have another already queued |
* when un-interrupted transfers are needed. |
* |
* NOTE that for now we don't accelerate ISO unlinks; they just |
* happen according to the current schedule. Means a delay of |
* up to about a second (max). |
*/ |
itd_free_list (ehci, urb); |
if (urb->status == -EINPROGRESS) |
urb->status = 0; |
/* complete() can reenter this HCD */ |
spin_unlock (&ehci->lock); |
usb_hcd_giveback_urb (&ehci->hcd, urb, regs); |
spin_lock (&ehci->lock); |
/* defer stopping schedule; completion can submit */ |
ehci->periodic_sched--; |
if (!ehci->periodic_sched) |
(void) disable_periodic (ehci); |
return 1; |
} |
/*-------------------------------------------------------------------------*/ |
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
{ |
int status; |
unsigned long flags; |
dbg ("itd_submit urb %p", urb); |
/* allocate ITDs w/o locking anything */ |
status = itd_urb_transaction (ehci, urb, mem_flags); |
if (status < 0) |
return status; |
/* schedule ... need to lock */ |
spin_lock_irqsave (&ehci->lock, flags); |
status = itd_schedule (ehci, urb); |
spin_unlock_irqrestore (&ehci->lock, flags); |
if (status < 0) |
itd_free_list (ehci, urb); |
return status; |
} |
#ifdef have_split_iso |
/*-------------------------------------------------------------------------*/ |
/* |
* "Split ISO TDs" ... used for USB 1.1 devices going through |
* the TTs in USB 2.0 hubs. |
* |
* FIXME not yet implemented |
*/ |
#endif /* have_split_iso */ |
/*-------------------------------------------------------------------------*/ |
static void |
scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) |
{ |
unsigned frame, clock, now_uframe, mod; |
unsigned count = 0; |
mod = ehci->periodic_size << 3; |
/* |
* When running, scan from last scan point up to "now" |
* else clean up by scanning everything that's left. |
* Touches as few pages as possible: cache-friendly. |
* Don't scan ISO entries more than once, though. |
*/ |
frame = ehci->next_uframe >> 3; |
if (HCD_IS_RUNNING (ehci->hcd.state)) |
now_uframe = readl (&ehci->regs->frame_index); |
else |
now_uframe = (frame << 3) - 1; |
now_uframe %= mod; |
clock = now_uframe >> 3; |
for (;;) { |
union ehci_shadow q, *q_p; |
u32 type, *hw_p; |
unsigned uframes; |
restart: |
/* scan schedule to _before_ current frame index */ |
if (frame == clock) |
uframes = now_uframe & 0x07; |
else |
uframes = 8; |
q_p = &ehci->pshadow [frame]; |
hw_p = &ehci->periodic [frame]; |
q.ptr = q_p->ptr; |
type = Q_NEXT_TYPE (*hw_p); |
/* scan each element in frame's queue for completions */ |
while (q.ptr != 0) { |
int last; |
unsigned uf; |
union ehci_shadow temp; |
switch (type) { |
case Q_TYPE_QH: |
last = (q.qh->hw_next == EHCI_LIST_END); |
temp = q.qh->qh_next; |
type = Q_NEXT_TYPE (q.qh->hw_next); |
count += intr_complete (ehci, frame, |
qh_get (q.qh), regs); |
qh_put (ehci, q.qh); |
q = temp; |
break; |
case Q_TYPE_FSTN: |
last = (q.fstn->hw_next == EHCI_LIST_END); |
/* for "save place" FSTNs, look at QH entries |
* in the previous frame for completions. |
*/ |
if (q.fstn->hw_prev != EHCI_LIST_END) { |
dbg ("ignoring completions from FSTNs"); |
} |
type = Q_NEXT_TYPE (q.fstn->hw_next); |
q = q.fstn->fstn_next; |
break; |
case Q_TYPE_ITD: |
last = (q.itd->hw_next == EHCI_LIST_END); |
/* Unlink each (S)ITD we see, since the ISO |
* URB model forces constant rescheduling. |
* That complicates sharing uframes in ITDs, |
* and means we need to skip uframes the HC |
* hasn't yet processed. |
*/ |
for (uf = 0; uf < uframes; uf++) { |
if (q.itd->hw_transaction [uf] != 0) { |
temp = q; |
*q_p = q.itd->itd_next; |
*hw_p = q.itd->hw_next; |
type = Q_NEXT_TYPE (*hw_p); |
/* might free q.itd ... */ |
count += itd_complete (ehci, |
temp.itd, uf, regs); |
break; |
} |
} |
/* we might skip this ITD's uframe ... */ |
if (uf == uframes) { |
q_p = &q.itd->itd_next; |
hw_p = &q.itd->hw_next; |
type = Q_NEXT_TYPE (q.itd->hw_next); |
} |
q = *q_p; |
break; |
#ifdef have_split_iso |
case Q_TYPE_SITD: |
last = (q.sitd->hw_next == EHCI_LIST_END); |
sitd_complete (ehci, q.sitd); |
type = Q_NEXT_TYPE (q.sitd->hw_next); |
// FIXME unlink SITD after split completes |
q = q.sitd->sitd_next; |
break; |
#endif /* have_split_iso */ |
default: |
dbg ("corrupt type %d frame %d shadow %p", |
type, frame, q.ptr); |
// BUG (); |
last = 1; |
q.ptr = 0; |
} |
/* did completion remove an interior q entry? */ |
if (unlikely (q.ptr == 0 && !last)) |
goto restart; |
} |
/* stop when we catch up to the HC */ |
// FIXME: this assumes we won't get lapped when |
// latencies climb; that should be rare, but... |
// detect it, and just go all the way around. |
// FLR might help detect this case, so long as latencies |
// don't exceed periodic_size msec (default 1.024 sec). |
// FIXME: likewise assumes HC doesn't halt mid-scan |
if (frame == clock) { |
unsigned now; |
if (!HCD_IS_RUNNING (ehci->hcd.state)) |
break; |
ehci->next_uframe = now_uframe; |
now = readl (&ehci->regs->frame_index) % mod; |
if (now_uframe == now) |
break; |
/* rescan the rest of this frame, then ... */ |
now_uframe = now; |
clock = now_uframe >> 3; |
} else |
frame = (frame + 1) % ehci->periodic_size; |
} |
} |