Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
846 | giacomo | 1 | /* |
2 | * Universal Host Controller Interface driver for USB. |
||
3 | * |
||
4 | * Maintainer: Johannes Erdfelt <johannes@erdfelt.com> |
||
5 | * |
||
6 | * (C) Copyright 1999 Linus Torvalds |
||
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com |
||
8 | * (C) Copyright 1999 Randy Dunlap |
||
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de |
||
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de |
||
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch |
||
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at |
||
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface |
||
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). |
||
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) |
||
16 | * |
||
17 | * Intel documents this fairly well, and as far as I know there |
||
18 | * are no royalties or anything like that, but even so there are |
||
19 | * people who decided that they want to do the same thing in a |
||
20 | * completely different way. |
||
21 | * |
||
22 | * WARNING! The USB documentation is downright evil. Most of it |
||
23 | * is just crap, written by a committee. You're better off ignoring |
||
24 | * most of it, the important stuff is: |
||
25 | * - the low-level protocol (fairly simple but lots of small details) |
||
26 | * - working around the horridness of the rest |
||
27 | */ |
||
28 | |||
29 | #include <linuxcomp.h> |
||
30 | |||
31 | #include <linux/config.h> |
||
32 | #include <linux/module.h> |
||
33 | #include <linux/pci.h> |
||
34 | #include <linux/kernel.h> |
||
35 | #include <linux/init.h> |
||
36 | #include <linux/delay.h> |
||
37 | #include <linux/ioport.h> |
||
38 | #include <linux/sched.h> |
||
39 | #include <linux/slab.h> |
||
40 | #include <linux/smp_lock.h> |
||
41 | #include <linux/errno.h> |
||
42 | #include <linux/unistd.h> |
||
43 | #include <linux/interrupt.h> |
||
44 | #include <linux/spinlock.h> |
||
45 | #include <linux/proc_fs.h> |
||
46 | #ifdef CONFIG_USB_DEBUG |
||
47 | #define DEBUG |
||
48 | #else |
||
49 | #undef DEBUG |
||
50 | #endif |
||
51 | #include <linux/usb.h> |
||
52 | |||
53 | #include <asm/uaccess.h> |
||
54 | #include <asm/io.h> |
||
55 | #include <asm/irq.h> |
||
56 | #include <asm/system.h> |
||
57 | |||
58 | #include "../core/hcd.h" |
||
59 | #include "uhci-hcd.h" |
||
60 | |||
61 | #include <linux/pm.h> |
||
62 | |||
63 | /* |
||
64 | * Version Information |
||
65 | */ |
||
66 | #define DRIVER_VERSION "v2.1" |
||
67 | #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber" |
||
68 | #define DRIVER_DESC "USB Universal Host Controller Interface driver" |
||
69 | |||
70 | /* |
||
71 | * debug = 0, no debugging messages |
||
72 | * debug = 1, dump failed URB's except for stalls |
||
73 | * debug = 2, dump all failed URB's (including stalls) |
||
74 | * show all queues in /proc/driver/uhci/[pci_addr] |
||
75 | * debug = 3, show all TD's in URB's when dumping |
||
76 | */ |
||
77 | #ifdef DEBUG |
||
78 | static int debug = 3; |
||
79 | #else |
||
80 | static int debug = 0; |
||
81 | #endif |
||
82 | MODULE_PARM(debug, "i"); |
||
83 | MODULE_PARM_DESC(debug, "Debug level"); |
||
84 | static char *errbuf; |
||
85 | #define ERRBUF_LEN (PAGE_SIZE * 8) |
||
86 | |||
87 | #include "uhci-hub.c" |
||
88 | #include "uhci-debug.c" |
||
89 | |||
90 | static kmem_cache_t *uhci_up_cachep; /* urb_priv */ |
||
91 | |||
92 | static int uhci_get_current_frame_number(struct uhci_hcd *uhci); |
||
93 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb); |
||
94 | static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb); |
||
95 | |||
96 | static void hc_state_transitions(struct uhci_hcd *uhci); |
||
97 | |||
98 | /* If a transfer is still active after this much time, turn off FSBR */ |
||
99 | #define IDLE_TIMEOUT (HZ / 20) /* 50 ms */ |
||
100 | #define FSBR_DELAY (HZ / 20) /* 50 ms */ |
||
101 | |||
102 | /* When we timeout an idle transfer for FSBR, we'll switch it over to */ |
||
103 | /* depth first traversal. We'll do it in groups of this number of TD's */ |
||
104 | /* to make sure it doesn't hog all of the bandwidth */ |
||
105 | #define DEPTH_INTERVAL 5 |
||
106 | |||
107 | /* |
||
108 | * Technically, updating td->status here is a race, but it's not really a |
||
109 | * problem. The worst that can happen is that we set the IOC bit again |
||
110 | * generating a spurious interrupt. We could fix this by creating another |
||
111 | * QH and leaving the IOC bit always set, but then we would have to play |
||
112 | * games with the FSBR code to make sure we get the correct order in all |
||
113 | * the cases. I don't think it's worth the effort |
||
114 | */ |
||
115 | static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
||
116 | { |
||
117 | unsigned long flags; |
||
118 | |||
119 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
120 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
||
121 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
122 | } |
||
123 | |||
124 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) |
||
125 | { |
||
126 | unsigned long flags; |
||
127 | |||
128 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
129 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); |
||
130 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
131 | } |
||
132 | |||
133 | static inline void uhci_add_complete(struct uhci_hcd *uhci, struct urb *urb) |
||
134 | { |
||
135 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
136 | unsigned long flags; |
||
137 | |||
138 | spin_lock_irqsave(&uhci->complete_list_lock, flags); |
||
139 | list_add_tail(&urbp->complete_list, &uhci->complete_list); |
||
140 | spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
||
141 | } |
||
142 | |||
143 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev) |
||
144 | { |
||
145 | dma_addr_t dma_handle; |
||
146 | struct uhci_td *td; |
||
147 | |||
148 | td = pci_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); |
||
149 | if (!td) |
||
150 | return NULL; |
||
151 | |||
152 | td->dma_handle = dma_handle; |
||
153 | |||
154 | td->link = UHCI_PTR_TERM; |
||
155 | td->buffer = 0; |
||
156 | |||
157 | td->frame = -1; |
||
158 | td->dev = dev; |
||
159 | |||
160 | INIT_LIST_HEAD(&td->list); |
||
161 | INIT_LIST_HEAD(&td->remove_list); |
||
162 | INIT_LIST_HEAD(&td->fl_list); |
||
163 | |||
164 | usb_get_dev(dev); |
||
165 | |||
166 | return td; |
||
167 | } |
||
168 | |||
169 | static inline void uhci_fill_td(struct uhci_td *td, __u32 status, |
||
170 | __u32 token, __u32 buffer) |
||
171 | { |
||
172 | td->status = cpu_to_le32(status); |
||
173 | td->token = cpu_to_le32(token); |
||
174 | td->buffer = cpu_to_le32(buffer); |
||
175 | } |
||
176 | |||
177 | /* |
||
178 | * We insert Isochronous URB's directly into the frame list at the beginning |
||
179 | */ |
||
180 | static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) |
||
181 | { |
||
182 | unsigned long flags; |
||
183 | |||
184 | framenum %= UHCI_NUMFRAMES; |
||
185 | |||
186 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
187 | |||
188 | td->frame = framenum; |
||
189 | |||
190 | /* Is there a TD already mapped there? */ |
||
191 | if (uhci->fl->frame_cpu[framenum]) { |
||
192 | struct uhci_td *ftd, *ltd; |
||
193 | |||
194 | ftd = uhci->fl->frame_cpu[framenum]; |
||
195 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
||
196 | |||
197 | list_add_tail(&td->fl_list, &ftd->fl_list); |
||
198 | |||
199 | td->link = ltd->link; |
||
200 | mb(); |
||
201 | ltd->link = cpu_to_le32(td->dma_handle); |
||
202 | } else { |
||
203 | td->link = uhci->fl->frame[framenum]; |
||
204 | mb(); |
||
205 | uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle); |
||
206 | uhci->fl->frame_cpu[framenum] = td; |
||
207 | } |
||
208 | |||
209 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
210 | } |
||
211 | |||
212 | static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td) |
||
213 | { |
||
214 | unsigned long flags; |
||
215 | |||
216 | /* If it's not inserted, don't remove it */ |
||
217 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
218 | if (td->frame == -1 && list_empty(&td->fl_list)) |
||
219 | goto out; |
||
220 | |||
221 | if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) { |
||
222 | if (list_empty(&td->fl_list)) { |
||
223 | uhci->fl->frame[td->frame] = td->link; |
||
224 | uhci->fl->frame_cpu[td->frame] = NULL; |
||
225 | } else { |
||
226 | struct uhci_td *ntd; |
||
227 | |||
228 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); |
||
229 | uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
||
230 | uhci->fl->frame_cpu[td->frame] = ntd; |
||
231 | } |
||
232 | } else { |
||
233 | struct uhci_td *ptd; |
||
234 | |||
235 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); |
||
236 | ptd->link = td->link; |
||
237 | } |
||
238 | |||
239 | mb(); |
||
240 | td->link = UHCI_PTR_TERM; |
||
241 | |||
242 | list_del_init(&td->fl_list); |
||
243 | td->frame = -1; |
||
244 | |||
245 | out: |
||
246 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
247 | } |
||
248 | |||
249 | /* |
||
250 | * Inserts a td into qh list at the top. |
||
251 | */ |
||
252 | static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth) |
||
253 | { |
||
254 | struct list_head *tmp, *head; |
||
255 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
256 | struct uhci_td *td, *ptd; |
||
257 | |||
258 | if (list_empty(&urbp->td_list)) |
||
259 | return; |
||
260 | |||
261 | head = &urbp->td_list; |
||
262 | tmp = head->next; |
||
263 | |||
264 | /* Ordering isn't important here yet since the QH hasn't been */ |
||
265 | /* inserted into the schedule yet */ |
||
266 | td = list_entry(tmp, struct uhci_td, list); |
||
267 | |||
268 | /* Add the first TD to the QH element pointer */ |
||
269 | qh->element = cpu_to_le32(td->dma_handle) | breadth; |
||
270 | |||
271 | ptd = td; |
||
272 | |||
273 | /* Then link the rest of the TD's */ |
||
274 | tmp = tmp->next; |
||
275 | while (tmp != head) { |
||
276 | td = list_entry(tmp, struct uhci_td, list); |
||
277 | |||
278 | tmp = tmp->next; |
||
279 | |||
280 | ptd->link = cpu_to_le32(td->dma_handle) | breadth; |
||
281 | |||
282 | ptd = td; |
||
283 | } |
||
284 | |||
285 | ptd->link = UHCI_PTR_TERM; |
||
286 | } |
||
287 | |||
288 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
||
289 | { |
||
290 | if (!list_empty(&td->list)) |
||
291 | dbg("td %p is still in list!", td); |
||
292 | if (!list_empty(&td->remove_list)) |
||
293 | dbg("td %p still in remove_list!", td); |
||
294 | if (!list_empty(&td->fl_list)) |
||
295 | dbg("td %p is still in fl_list!", td); |
||
296 | |||
297 | if (td->dev) |
||
298 | usb_put_dev(td->dev); |
||
299 | |||
300 | pci_pool_free(uhci->td_pool, td, td->dma_handle); |
||
301 | } |
||
302 | |||
303 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev) |
||
304 | { |
||
305 | dma_addr_t dma_handle; |
||
306 | struct uhci_qh *qh; |
||
307 | |||
308 | qh = pci_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); |
||
309 | if (!qh) |
||
310 | return NULL; |
||
311 | |||
312 | qh->dma_handle = dma_handle; |
||
313 | |||
314 | qh->element = UHCI_PTR_TERM; |
||
315 | qh->link = UHCI_PTR_TERM; |
||
316 | |||
317 | qh->dev = dev; |
||
318 | qh->urbp = NULL; |
||
319 | |||
320 | INIT_LIST_HEAD(&qh->list); |
||
321 | INIT_LIST_HEAD(&qh->remove_list); |
||
322 | |||
323 | usb_get_dev(dev); |
||
324 | |||
325 | return qh; |
||
326 | } |
||
327 | |||
328 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
||
329 | { |
||
330 | if (!list_empty(&qh->list)) |
||
331 | dbg("qh %p list not empty!", qh); |
||
332 | if (!list_empty(&qh->remove_list)) |
||
333 | dbg("qh %p still in remove_list!", qh); |
||
334 | |||
335 | if (qh->dev) |
||
336 | usb_put_dev(qh->dev); |
||
337 | |||
338 | pci_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
||
339 | } |
||
340 | |||
341 | /* |
||
342 | * Append this urb's qh after the last qh in skelqh->list |
||
343 | * MUST be called with uhci->frame_list_lock acquired |
||
344 | * |
||
345 | * Note that urb_priv.queue_list doesn't have a separate queue head; |
||
346 | * it's a ring with every element "live". |
||
347 | */ |
||
348 | static void _uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
||
349 | { |
||
350 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
351 | struct list_head *tmp; |
||
352 | struct uhci_qh *lqh; |
||
353 | |||
354 | /* Grab the last QH */ |
||
355 | lqh = list_entry(skelqh->list.prev, struct uhci_qh, list); |
||
356 | |||
357 | /* |
||
358 | * Patch this endpoint's URB's QHs to point to the next skelqh: |
||
359 | * skelqh --> ... lqh --> newqh --> next skelqh |
||
360 | * Do this first, so the HC always sees the right QH after this one. |
||
361 | */ |
||
362 | list_for_each (tmp, &urbp->queue_list) { |
||
363 | struct urb_priv *turbp = |
||
364 | list_entry(tmp, struct urb_priv, queue_list); |
||
365 | |||
366 | turbp->qh->link = lqh->link; |
||
367 | } |
||
368 | urbp->qh->link = lqh->link; |
||
369 | wmb(); /* Ordering is important */ |
||
370 | |||
371 | /* |
||
372 | * Patch QHs for previous endpoint's queued URBs? HC goes |
||
373 | * here next, not to the next skelqh it now points to. |
||
374 | * |
||
375 | * lqh --> td ... --> qh ... --> td --> qh ... --> td |
||
376 | * | | | |
||
377 | * v v v |
||
378 | * +<----------------+-----------------+ |
||
379 | * v |
||
380 | * newqh --> td ... --> td |
||
381 | * | |
||
382 | * v |
||
383 | * ... |
||
384 | * |
||
385 | * The HC could see (and use!) any of these as we write them. |
||
386 | */ |
||
387 | if (lqh->urbp) { |
||
388 | list_for_each (tmp, &lqh->urbp->queue_list) { |
||
389 | struct urb_priv *turbp = |
||
390 | list_entry(tmp, struct urb_priv, queue_list); |
||
391 | |||
392 | turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
||
393 | } |
||
394 | } |
||
395 | lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
||
396 | |||
397 | list_add_tail(&urbp->qh->list, &skelqh->list); |
||
398 | } |
||
399 | |||
400 | static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) |
||
401 | { |
||
402 | unsigned long flags; |
||
403 | |||
404 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
405 | _uhci_insert_qh(uhci, skelqh, urb); |
||
406 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
407 | } |
||
408 | |||
409 | /* |
||
410 | * Start removal of QH from schedule; it finishes next frame. |
||
411 | * TDs should be unlinked before this is called. |
||
412 | */ |
||
413 | static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
||
414 | { |
||
415 | unsigned long flags; |
||
416 | struct uhci_qh *pqh; |
||
417 | |||
418 | if (!qh) |
||
419 | return; |
||
420 | |||
421 | qh->urbp = NULL; |
||
422 | |||
423 | /* |
||
424 | * Only go through the hoops if it's actually linked in |
||
425 | * Queued QHs are removed in uhci_delete_queued_urb, |
||
426 | * since (for queued URBs) the pqh is pointed to the next |
||
427 | * QH in the queue, not the next endpoint's QH. |
||
428 | */ |
||
429 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
430 | if (!list_empty(&qh->list)) { |
||
431 | pqh = list_entry(qh->list.prev, struct uhci_qh, list); |
||
432 | |||
433 | if (pqh->urbp) { |
||
434 | struct list_head *head, *tmp; |
||
435 | |||
436 | head = &pqh->urbp->queue_list; |
||
437 | tmp = head->next; |
||
438 | while (head != tmp) { |
||
439 | struct urb_priv *turbp = |
||
440 | list_entry(tmp, struct urb_priv, queue_list); |
||
441 | |||
442 | tmp = tmp->next; |
||
443 | |||
444 | turbp->qh->link = qh->link; |
||
445 | } |
||
446 | } |
||
447 | |||
448 | pqh->link = qh->link; |
||
449 | mb(); |
||
450 | /* Leave qh->link in case the HC is on the QH now, it will */ |
||
451 | /* continue the rest of the schedule */ |
||
452 | qh->element = UHCI_PTR_TERM; |
||
453 | |||
454 | list_del_init(&qh->list); |
||
455 | } |
||
456 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
457 | |||
458 | spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
||
459 | |||
460 | /* Check to see if the remove list is empty. Set the IOC bit */ |
||
461 | /* to force an interrupt so we can remove the QH */ |
||
462 | if (list_empty(&uhci->qh_remove_list)) |
||
463 | uhci_set_next_interrupt(uhci); |
||
464 | |||
465 | list_add(&qh->remove_list, &uhci->qh_remove_list); |
||
466 | |||
467 | spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
||
468 | } |
||
469 | |||
470 | static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle) |
||
471 | { |
||
472 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
473 | struct list_head *head, *tmp; |
||
474 | |||
475 | head = &urbp->td_list; |
||
476 | tmp = head->next; |
||
477 | while (head != tmp) { |
||
478 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
479 | |||
480 | tmp = tmp->next; |
||
481 | |||
482 | if (toggle) |
||
483 | td->token |= cpu_to_le32(TD_TOKEN_TOGGLE); |
||
484 | else |
||
485 | td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE); |
||
486 | |||
487 | |||
488 | toggle ^= 1; |
||
489 | } |
||
490 | |||
491 | return toggle; |
||
492 | } |
||
493 | |||
494 | /* This function will append one URB's QH to another URB's QH. This is for */ |
||
495 | /* queuing interrupt, control or bulk transfers */ |
||
496 | static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb) |
||
497 | { |
||
498 | struct urb_priv *eurbp, *urbp, *furbp, *lurbp; |
||
499 | struct list_head *tmp; |
||
500 | struct uhci_td *lltd; |
||
501 | unsigned long flags; |
||
502 | |||
503 | eurbp = eurb->hcpriv; |
||
504 | urbp = urb->hcpriv; |
||
505 | |||
506 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
507 | |||
508 | /* Find the first URB in the queue */ |
||
509 | if (eurbp->queued) { |
||
510 | struct list_head *head = &eurbp->queue_list; |
||
511 | |||
512 | tmp = head->next; |
||
513 | while (tmp != head) { |
||
514 | struct urb_priv *turbp = |
||
515 | list_entry(tmp, struct urb_priv, queue_list); |
||
516 | |||
517 | if (!turbp->queued) |
||
518 | break; |
||
519 | |||
520 | tmp = tmp->next; |
||
521 | } |
||
522 | } else |
||
523 | tmp = &eurbp->queue_list; |
||
524 | |||
525 | furbp = list_entry(tmp, struct urb_priv, queue_list); |
||
526 | lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list); |
||
527 | |||
528 | lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list); |
||
529 | |||
530 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), |
||
531 | uhci_fixup_toggle(urb, uhci_toggle(td_token(lltd)) ^ 1)); |
||
532 | |||
533 | /* All qh's in the queue need to link to the next queue */ |
||
534 | urbp->qh->link = eurbp->qh->link; |
||
535 | |||
536 | mb(); /* Make sure we flush everything */ |
||
537 | |||
538 | lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; |
||
539 | |||
540 | list_add_tail(&urbp->queue_list, &furbp->queue_list); |
||
541 | |||
542 | urbp->queued = 1; |
||
543 | |||
544 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
545 | } |
||
546 | |||
547 | static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb) |
||
548 | { |
||
549 | struct urb_priv *urbp, *nurbp; |
||
550 | struct list_head *head, *tmp; |
||
551 | struct urb_priv *purbp; |
||
552 | struct uhci_td *pltd; |
||
553 | unsigned int toggle; |
||
554 | unsigned long flags; |
||
555 | |||
556 | urbp = urb->hcpriv; |
||
557 | |||
558 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
559 | |||
560 | if (list_empty(&urbp->queue_list)) |
||
561 | goto out; |
||
562 | |||
563 | nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list); |
||
564 | |||
565 | /* Fix up the toggle for the next URB's */ |
||
566 | if (!urbp->queued) |
||
567 | /* We just set the toggle in uhci_unlink_generic */ |
||
568 | toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); |
||
569 | else { |
||
570 | /* If we're in the middle of the queue, grab the toggle */ |
||
571 | /* from the TD previous to us */ |
||
572 | purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
||
573 | queue_list); |
||
574 | |||
575 | pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
||
576 | |||
577 | toggle = uhci_toggle(td_token(pltd)) ^ 1; |
||
578 | } |
||
579 | |||
580 | head = &urbp->queue_list; |
||
581 | tmp = head->next; |
||
582 | while (head != tmp) { |
||
583 | struct urb_priv *turbp; |
||
584 | |||
585 | turbp = list_entry(tmp, struct urb_priv, queue_list); |
||
586 | |||
587 | tmp = tmp->next; |
||
588 | |||
589 | if (!turbp->queued) |
||
590 | break; |
||
591 | |||
592 | toggle = uhci_fixup_toggle(turbp->urb, toggle); |
||
593 | } |
||
594 | |||
595 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
||
596 | usb_pipeout(urb->pipe), toggle); |
||
597 | |||
598 | if (!urbp->queued) { |
||
599 | struct uhci_qh *pqh; |
||
600 | |||
601 | nurbp->queued = 0; |
||
602 | |||
603 | /* |
||
604 | * Fixup the previous QH's queue to link to the new head |
||
605 | * of this queue. |
||
606 | */ |
||
607 | pqh = list_entry(urbp->qh->list.prev, struct uhci_qh, list); |
||
608 | |||
609 | if (pqh->urbp) { |
||
610 | struct list_head *head, *tmp; |
||
611 | |||
612 | head = &pqh->urbp->queue_list; |
||
613 | tmp = head->next; |
||
614 | while (head != tmp) { |
||
615 | struct urb_priv *turbp = |
||
616 | list_entry(tmp, struct urb_priv, queue_list); |
||
617 | |||
618 | tmp = tmp->next; |
||
619 | |||
620 | turbp->qh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
||
621 | } |
||
622 | } |
||
623 | |||
624 | pqh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
||
625 | |||
626 | list_add_tail(&nurbp->qh->list, &urbp->qh->list); |
||
627 | list_del_init(&urbp->qh->list); |
||
628 | } else { |
||
629 | /* We're somewhere in the middle (or end). A bit trickier */ |
||
630 | /* than the head scenario */ |
||
631 | purbp = list_entry(urbp->queue_list.prev, struct urb_priv, |
||
632 | queue_list); |
||
633 | |||
634 | pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); |
||
635 | if (nurbp->queued) |
||
636 | pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; |
||
637 | else |
||
638 | /* The next URB happens to be the beginning, so */ |
||
639 | /* we're the last, end the chain */ |
||
640 | pltd->link = UHCI_PTR_TERM; |
||
641 | } |
||
642 | |||
643 | list_del_init(&urbp->queue_list); |
||
644 | |||
645 | out: |
||
646 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
647 | } |
||
648 | |||
649 | extern void* malloc(int size); |
||
650 | |||
651 | static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
||
652 | { |
||
653 | struct urb_priv *urbp; |
||
654 | |||
655 | urbp = malloc(sizeof(struct urb_priv)); //**kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); |
||
656 | if (!urbp) { |
||
657 | err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n"); |
||
658 | return NULL; |
||
659 | } |
||
660 | |||
661 | memset((void *)urbp, 0, sizeof(*urbp)); |
||
662 | |||
663 | urbp->inserttime = jiffies26; |
||
664 | urbp->fsbrtime = jiffies26; |
||
665 | urbp->urb = urb; |
||
666 | urbp->dev = urb->dev; |
||
667 | |||
668 | INIT_LIST_HEAD(&urbp->td_list); |
||
669 | INIT_LIST_HEAD(&urbp->queue_list); |
||
670 | INIT_LIST_HEAD(&urbp->complete_list); |
||
671 | INIT_LIST_HEAD(&urbp->urb_list); |
||
672 | |||
673 | list_add_tail(&urbp->urb_list, &uhci->urb_list); |
||
674 | |||
675 | urb->hcpriv = urbp; |
||
676 | |||
677 | return urbp; |
||
678 | } |
||
679 | |||
680 | /* |
||
681 | * MUST be called with urb->lock acquired |
||
682 | */ |
||
683 | static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) |
||
684 | { |
||
685 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
686 | |||
687 | td->urb = urb; |
||
688 | |||
689 | list_add_tail(&td->list, &urbp->td_list); |
||
690 | } |
||
691 | |||
692 | /* |
||
693 | * MUST be called with urb->lock acquired |
||
694 | */ |
||
695 | static void uhci_remove_td_from_urb(struct uhci_td *td) |
||
696 | { |
||
697 | if (list_empty(&td->list)) |
||
698 | return; |
||
699 | |||
700 | list_del_init(&td->list); |
||
701 | |||
702 | td->urb = NULL; |
||
703 | } |
||
704 | |||
705 | /* |
||
706 | * MUST be called with urb->lock acquired |
||
707 | */ |
||
708 | static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) |
||
709 | { |
||
710 | struct list_head *head, *tmp; |
||
711 | struct urb_priv *urbp; |
||
712 | unsigned long flags; |
||
713 | |||
714 | urbp = (struct urb_priv *)urb->hcpriv; |
||
715 | if (!urbp) |
||
716 | return; |
||
717 | |||
718 | if (!list_empty(&urbp->urb_list)) |
||
719 | warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb); |
||
720 | |||
721 | if (!list_empty(&urbp->complete_list)) |
||
722 | warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb); |
||
723 | |||
724 | spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
||
725 | |||
726 | /* Check to see if the remove list is empty. Set the IOC bit */ |
||
727 | /* to force an interrupt so we can remove the TD's*/ |
||
728 | if (list_empty(&uhci->td_remove_list)) |
||
729 | uhci_set_next_interrupt(uhci); |
||
730 | |||
731 | head = &urbp->td_list; |
||
732 | tmp = head->next; |
||
733 | while (tmp != head) { |
||
734 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
735 | |||
736 | tmp = tmp->next; |
||
737 | |||
738 | uhci_remove_td_from_urb(td); |
||
739 | uhci_remove_td(uhci, td); |
||
740 | list_add(&td->remove_list, &uhci->td_remove_list); |
||
741 | } |
||
742 | |||
743 | spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
||
744 | |||
745 | urb->hcpriv = NULL; |
||
746 | //**kmem_cache_free(uhci_up_cachep, urbp); |
||
747 | free(urbp); |
||
748 | } |
||
749 | |||
750 | static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
||
751 | { |
||
752 | unsigned long flags; |
||
753 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
754 | |||
755 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
756 | |||
757 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { |
||
758 | urbp->fsbr = 1; |
||
759 | if (!uhci->fsbr++ && !uhci->fsbrtimeout) |
||
760 | uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
||
761 | } |
||
762 | |||
763 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
764 | } |
||
765 | |||
766 | static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
||
767 | { |
||
768 | unsigned long flags; |
||
769 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
770 | |||
771 | spin_lock_irqsave(&uhci->frame_list_lock, flags); |
||
772 | |||
773 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { |
||
774 | urbp->fsbr = 0; |
||
775 | if (!--uhci->fsbr) |
||
776 | uhci->fsbrtimeout = jiffies26 + FSBR_DELAY; |
||
777 | } |
||
778 | |||
779 | spin_unlock_irqrestore(&uhci->frame_list_lock, flags); |
||
780 | } |
||
781 | |||
782 | /* |
||
783 | * Map status to standard result codes |
||
784 | * |
||
785 | * <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)] |
||
786 | * <dir_out> is True for output TDs and False for input TDs. |
||
787 | */ |
||
788 | static int uhci_map_status(int status, int dir_out) |
||
789 | { |
||
790 | if (!status) |
||
791 | return 0; |
||
792 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ |
||
793 | return -EPROTO; |
||
794 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ |
||
795 | if (dir_out) |
||
796 | return -ETIMEDOUT; |
||
797 | else |
||
798 | return -EILSEQ; |
||
799 | } |
||
800 | if (status & TD_CTRL_NAK) /* NAK */ |
||
801 | return -ETIMEDOUT; |
||
802 | if (status & TD_CTRL_BABBLE) /* Babble */ |
||
803 | return -EOVERFLOW; |
||
804 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ |
||
805 | return -ENOSR; |
||
806 | if (status & TD_CTRL_STALLED) /* Stalled */ |
||
807 | return -EPIPE; |
||
808 | if (status & TD_CTRL_ACTIVE) /* Active */ |
||
809 | return 0; |
||
810 | |||
811 | return -EINVAL; |
||
812 | } |
||
813 | |||
814 | /* |
||
815 | * Control transfers |
||
816 | */ |
||
817 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
||
818 | { |
||
819 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
820 | struct uhci_td *td; |
||
821 | struct uhci_qh *qh, *skelqh; |
||
822 | unsigned long destination, status; |
||
823 | int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
||
824 | int len = urb->transfer_buffer_length; |
||
825 | dma_addr_t data = urb->transfer_dma; |
||
826 | |||
827 | /* The "pipe" thing contains the destination in bits 8--18 */ |
||
828 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; |
||
829 | |||
830 | /* 3 errors */ |
||
831 | status = TD_CTRL_ACTIVE | uhci_maxerr(3); |
||
832 | if (urb->dev->speed == USB_SPEED_LOW) |
||
833 | status |= TD_CTRL_LS; |
||
834 | |||
835 | /* |
||
836 | * Build the TD for the control request |
||
837 | */ |
||
838 | td = uhci_alloc_td(uhci, urb->dev); |
||
839 | if (!td) |
||
840 | return -ENOMEM; |
||
841 | |||
842 | uhci_add_td_to_urb(urb, td); |
||
843 | uhci_fill_td(td, status, destination | uhci_explen(7), |
||
844 | urb->setup_dma); |
||
845 | |||
846 | /* |
||
847 | * If direction is "send", change the frame from SETUP (0x2D) |
||
848 | * to OUT (0xE1). Else change it from SETUP to IN (0x69). |
||
849 | */ |
||
850 | destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe)); |
||
851 | |||
852 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
||
853 | status |= TD_CTRL_SPD; |
||
854 | |||
855 | /* |
||
856 | * Build the DATA TD's |
||
857 | */ |
||
858 | while (len > 0) { |
||
859 | int pktsze = len; |
||
860 | |||
861 | if (pktsze > maxsze) |
||
862 | pktsze = maxsze; |
||
863 | |||
864 | td = uhci_alloc_td(uhci, urb->dev); |
||
865 | if (!td) |
||
866 | return -ENOMEM; |
||
867 | |||
868 | /* Alternate Data0/1 (start with Data1) */ |
||
869 | destination ^= TD_TOKEN_TOGGLE; |
||
870 | |||
871 | uhci_add_td_to_urb(urb, td); |
||
872 | uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1), |
||
873 | data); |
||
874 | |||
875 | data += pktsze; |
||
876 | len -= pktsze; |
||
877 | } |
||
878 | |||
879 | /* |
||
880 | * Build the final TD for control status |
||
881 | */ |
||
882 | td = uhci_alloc_td(uhci, urb->dev); |
||
883 | if (!td) |
||
884 | return -ENOMEM; |
||
885 | |||
886 | /* |
||
887 | * It's IN if the pipe is an output pipe or we're not expecting |
||
888 | * data back. |
||
889 | */ |
||
890 | destination &= ~TD_TOKEN_PID_MASK; |
||
891 | if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) |
||
892 | destination |= USB_PID_IN; |
||
893 | else |
||
894 | destination |= USB_PID_OUT; |
||
895 | |||
896 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
||
897 | |||
898 | status &= ~TD_CTRL_SPD; |
||
899 | |||
900 | uhci_add_td_to_urb(urb, td); |
||
901 | uhci_fill_td(td, status | TD_CTRL_IOC, |
||
902 | destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0); |
||
903 | |||
904 | qh = uhci_alloc_qh(uhci, urb->dev); |
||
905 | if (!qh) |
||
906 | return -ENOMEM; |
||
907 | |||
908 | urbp->qh = qh; |
||
909 | qh->urbp = urbp; |
||
910 | |||
911 | uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
||
912 | |||
913 | /* Low speed transfers get a different queue, and won't hog the bus */ |
||
914 | if (urb->dev->speed == USB_SPEED_LOW) |
||
915 | skelqh = uhci->skel_ls_control_qh; |
||
916 | else { |
||
917 | skelqh = uhci->skel_hs_control_qh; |
||
918 | uhci_inc_fsbr(uhci, urb); |
||
919 | } |
||
920 | |||
921 | if (eurb) |
||
922 | uhci_append_queued_urb(uhci, eurb, urb); |
||
923 | else |
||
924 | uhci_insert_qh(uhci, skelqh, urb); |
||
925 | |||
926 | return -EINPROGRESS; |
||
927 | } |
||
928 | |||
929 | /* |
||
930 | * If control was short, then end status packet wasn't sent, so this |
||
931 | * reorganize s so it's sent to finish the transfer. The original QH is |
||
932 | * removed from the skel and discarded; all TDs except the last (status) |
||
933 | * are deleted; the last (status) TD is put on a new QH which is reinserted |
||
934 | * into the skel. Since the last TD and urb_priv are reused, the TD->link |
||
935 | * and urb_priv maintain any queued QHs. |
||
936 | */ |
||
937 | static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) |
||
938 | { |
||
939 | struct list_head *tmp, *head; |
||
940 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
941 | |||
942 | urbp->short_control_packet = 1; |
||
943 | |||
944 | /* Create a new QH to avoid pointer overwriting problems */ |
||
945 | uhci_remove_qh(uhci, urbp->qh); |
||
946 | |||
947 | /* Delete all of the TD's except for the status TD at the end */ |
||
948 | head = &urbp->td_list; |
||
949 | tmp = head->next; |
||
950 | while (tmp != head && tmp->next != head) { |
||
951 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
952 | |||
953 | tmp = tmp->next; |
||
954 | |||
955 | uhci_remove_td_from_urb(td); |
||
956 | uhci_remove_td(uhci, td); |
||
957 | uhci_free_td(uhci, td); |
||
958 | } |
||
959 | |||
960 | urbp->qh = uhci_alloc_qh(uhci, urb->dev); |
||
961 | if (!urbp->qh) { |
||
962 | err("unable to allocate new QH for control retrigger"); |
||
963 | return -ENOMEM; |
||
964 | } |
||
965 | |||
966 | urbp->qh->urbp = urbp; |
||
967 | |||
968 | /* One TD, who cares about Breadth first? */ |
||
969 | uhci_insert_tds_in_qh(urbp->qh, urb, UHCI_PTR_DEPTH); |
||
970 | |||
971 | /* Low speed transfers get a different queue */ |
||
972 | if (urb->dev->speed == USB_SPEED_LOW) |
||
973 | uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb); |
||
974 | else |
||
975 | uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb); |
||
976 | |||
977 | return -EINPROGRESS; |
||
978 | } |
||
979 | |||
980 | |||
981 | static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) |
||
982 | { |
||
983 | struct list_head *tmp, *head; |
||
984 | struct urb_priv *urbp = urb->hcpriv; |
||
985 | struct uhci_td *td; |
||
986 | unsigned int status; |
||
987 | int ret = 0; |
||
988 | |||
989 | if (list_empty(&urbp->td_list)) |
||
990 | return -EINVAL; |
||
991 | |||
992 | head = &urbp->td_list; |
||
993 | if (urbp->short_control_packet) { |
||
994 | tmp = head->prev; |
||
995 | goto status_phase; |
||
996 | } |
||
997 | tmp = head->next; |
||
998 | td = list_entry(tmp, struct uhci_td, list); |
||
999 | |||
1000 | /* The first TD is the SETUP phase, check the status, but skip */ |
||
1001 | /* the count */ |
||
1002 | status = uhci_status_bits(td_status(td)); |
||
1003 | if (status & TD_CTRL_ACTIVE) |
||
1004 | return -EINPROGRESS; |
||
1005 | |||
1006 | if (status) |
||
1007 | goto td_error; |
||
1008 | |||
1009 | urb->actual_length = 0; |
||
1010 | |||
1011 | /* The rest of the TD's (but the last) are data */ |
||
1012 | tmp = tmp->next; |
||
1013 | while (tmp != head && tmp->next != head) { |
||
1014 | td = list_entry(tmp, struct uhci_td, list); |
||
1015 | |||
1016 | tmp = tmp->next; |
||
1017 | |||
1018 | status = uhci_status_bits(td_status(td)); |
||
1019 | if (status & TD_CTRL_ACTIVE) |
||
1020 | return -EINPROGRESS; |
||
1021 | |||
1022 | urb->actual_length += uhci_actual_length(td_status(td)); |
||
1023 | |||
1024 | if (status) |
||
1025 | goto td_error; |
||
1026 | |||
1027 | /* Check to see if we received a short packet */ |
||
1028 | if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
||
1029 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
||
1030 | ret = -EREMOTEIO; |
||
1031 | goto err; |
||
1032 | } |
||
1033 | |||
1034 | if (uhci_packetid(td_token(td)) == USB_PID_IN) |
||
1035 | return usb_control_retrigger_status(uhci, urb); |
||
1036 | else |
||
1037 | return 0; |
||
1038 | } |
||
1039 | } |
||
1040 | |||
1041 | status_phase: |
||
1042 | td = list_entry(tmp, struct uhci_td, list); |
||
1043 | |||
1044 | /* Control status phase */ |
||
1045 | status = td_status(td); |
||
1046 | |||
1047 | #ifdef I_HAVE_BUGGY_APC_BACKUPS |
||
1048 | /* APC BackUPS Pro kludge */ |
||
1049 | /* It tries to send all of the descriptor instead of the amount */ |
||
1050 | /* we requested */ |
||
1051 | if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ |
||
1052 | status & TD_CTRL_ACTIVE && |
||
1053 | status & TD_CTRL_NAK) |
||
1054 | return 0; |
||
1055 | #endif |
||
1056 | |||
1057 | if (status & TD_CTRL_ACTIVE) |
||
1058 | return -EINPROGRESS; |
||
1059 | |||
1060 | if (uhci_status_bits(status)) |
||
1061 | goto td_error; |
||
1062 | |||
1063 | return 0; |
||
1064 | |||
1065 | td_error: |
||
1066 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
||
1067 | |||
1068 | err: |
||
1069 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
||
1070 | /* Some debugging code */ |
||
1071 | dbg("uhci_result_control() failed with status %x", status); |
||
1072 | |||
1073 | if (errbuf) { |
||
1074 | /* Print the chain for debugging purposes */ |
||
1075 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
||
1076 | |||
1077 | lprintk(errbuf); |
||
1078 | } |
||
1079 | } |
||
1080 | |||
1081 | return ret; |
||
1082 | } |
||
1083 | |||
1084 | /* |
||
1085 | * Common submit for bulk and interrupt |
||
1086 | */ |
||
1087 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh) |
||
1088 | { |
||
1089 | struct uhci_td *td; |
||
1090 | struct uhci_qh *qh; |
||
1091 | unsigned long destination, status; |
||
1092 | int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); |
||
1093 | int len = urb->transfer_buffer_length; |
||
1094 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
1095 | dma_addr_t data = urb->transfer_dma; |
||
1096 | |||
1097 | if (len < 0) |
||
1098 | return -EINVAL; |
||
1099 | |||
1100 | /* The "pipe" thing contains the destination in bits 8--18 */ |
||
1101 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
||
1102 | |||
1103 | status = uhci_maxerr(3) | TD_CTRL_ACTIVE; |
||
1104 | if (urb->dev->speed == USB_SPEED_LOW) |
||
1105 | status |= TD_CTRL_LS; |
||
1106 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) |
||
1107 | status |= TD_CTRL_SPD; |
||
1108 | |||
1109 | /* |
||
1110 | * Build the DATA TD's |
||
1111 | */ |
||
1112 | do { /* Allow zero length packets */ |
||
1113 | int pktsze = len; |
||
1114 | |||
1115 | if (pktsze > maxsze) |
||
1116 | pktsze = maxsze; |
||
1117 | |||
1118 | td = uhci_alloc_td(uhci, urb->dev); |
||
1119 | if (!td) |
||
1120 | return -ENOMEM; |
||
1121 | |||
1122 | uhci_add_td_to_urb(urb, td); |
||
1123 | uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) | |
||
1124 | (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
||
1125 | usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
||
1126 | data); |
||
1127 | |||
1128 | data += pktsze; |
||
1129 | len -= maxsze; |
||
1130 | |||
1131 | usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
||
1132 | usb_pipeout(urb->pipe)); |
||
1133 | } while (len > 0); |
||
1134 | |||
1135 | /* |
||
1136 | * URB_ZERO_PACKET means adding a 0-length packet, if direction |
||
1137 | * is OUT and the transfer_length was an exact multiple of maxsze, |
||
1138 | * hence (len = transfer_length - N * maxsze) == 0 |
||
1139 | * however, if transfer_length == 0, the zero packet was already |
||
1140 | * prepared above. |
||
1141 | */ |
||
1142 | if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) && |
||
1143 | !len && urb->transfer_buffer_length) { |
||
1144 | td = uhci_alloc_td(uhci, urb->dev); |
||
1145 | if (!td) |
||
1146 | return -ENOMEM; |
||
1147 | |||
1148 | uhci_add_td_to_urb(urb, td); |
||
1149 | uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) | |
||
1150 | (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
||
1151 | usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), |
||
1152 | data); |
||
1153 | |||
1154 | usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
||
1155 | usb_pipeout(urb->pipe)); |
||
1156 | } |
||
1157 | |||
1158 | /* Set the flag on the last packet */ |
||
1159 | td->status |= cpu_to_le32(TD_CTRL_IOC); |
||
1160 | |||
1161 | qh = uhci_alloc_qh(uhci, urb->dev); |
||
1162 | if (!qh) |
||
1163 | return -ENOMEM; |
||
1164 | |||
1165 | urbp->qh = qh; |
||
1166 | qh->urbp = urbp; |
||
1167 | |||
1168 | /* Always breadth first */ |
||
1169 | uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); |
||
1170 | |||
1171 | if (eurb) |
||
1172 | uhci_append_queued_urb(uhci, eurb, urb); |
||
1173 | else |
||
1174 | uhci_insert_qh(uhci, skelqh, urb); |
||
1175 | |||
1176 | return -EINPROGRESS; |
||
1177 | } |
||
1178 | |||
1179 | /* |
||
1180 | * Common result for bulk and interrupt |
||
1181 | */ |
||
1182 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) |
||
1183 | { |
||
1184 | struct list_head *tmp, *head; |
||
1185 | struct urb_priv *urbp = urb->hcpriv; |
||
1186 | struct uhci_td *td; |
||
1187 | unsigned int status = 0; |
||
1188 | int ret = 0; |
||
1189 | |||
1190 | urb->actual_length = 0; |
||
1191 | |||
1192 | head = &urbp->td_list; |
||
1193 | tmp = head->next; |
||
1194 | while (tmp != head) { |
||
1195 | td = list_entry(tmp, struct uhci_td, list); |
||
1196 | |||
1197 | tmp = tmp->next; |
||
1198 | |||
1199 | status = uhci_status_bits(td_status(td)); |
||
1200 | if (status & TD_CTRL_ACTIVE) |
||
1201 | return -EINPROGRESS; |
||
1202 | |||
1203 | urb->actual_length += uhci_actual_length(td_status(td)); |
||
1204 | |||
1205 | if (status) |
||
1206 | goto td_error; |
||
1207 | |||
1208 | if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) { |
||
1209 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
||
1210 | ret = -EREMOTEIO; |
||
1211 | goto err; |
||
1212 | } else |
||
1213 | return 0; |
||
1214 | } |
||
1215 | } |
||
1216 | |||
1217 | return 0; |
||
1218 | |||
1219 | td_error: |
||
1220 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); |
||
1221 | if (ret == -EPIPE) |
||
1222 | /* endpoint has stalled - mark it halted */ |
||
1223 | usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)), |
||
1224 | uhci_packetout(td_token(td))); |
||
1225 | |||
1226 | err: |
||
1227 | /* |
||
1228 | * Enable this chunk of code if you want to see some more debugging. |
||
1229 | * But be careful, it has the tendancy to starve out khubd and prevent |
||
1230 | * disconnects from happening successfully if you have a slow debug |
||
1231 | * log interface (like a serial console. |
||
1232 | */ |
||
1233 | #if 0 |
||
1234 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
||
1235 | /* Some debugging code */ |
||
1236 | dbg("uhci_result_common() failed with status %x", status); |
||
1237 | |||
1238 | if (errbuf) { |
||
1239 | /* Print the chain for debugging purposes */ |
||
1240 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); |
||
1241 | |||
1242 | lprintk(errbuf); |
||
1243 | } |
||
1244 | } |
||
1245 | #endif |
||
1246 | return ret; |
||
1247 | } |
||
1248 | |||
1249 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
||
1250 | { |
||
1251 | int ret; |
||
1252 | |||
1253 | /* Can't have low speed bulk transfers */ |
||
1254 | if (urb->dev->speed == USB_SPEED_LOW) |
||
1255 | return -EINVAL; |
||
1256 | |||
1257 | ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh); |
||
1258 | if (ret == -EINPROGRESS) |
||
1259 | uhci_inc_fsbr(uhci, urb); |
||
1260 | |||
1261 | return ret; |
||
1262 | } |
||
1263 | |||
1264 | static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) |
||
1265 | { |
||
1266 | /* USB 1.1 interrupt transfers only involve one packet per interval; |
||
1267 | * that's the uhci_submit_common() "breadth first" policy. Drivers |
||
1268 | * can submit urbs of any length, but longer ones might need many |
||
1269 | * intervals to complete. |
||
1270 | */ |
||
1271 | return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]); |
||
1272 | } |
||
1273 | |||
1274 | /* |
||
1275 | * Bulk and interrupt use common result |
||
1276 | */ |
||
1277 | #define uhci_result_bulk uhci_result_common |
||
1278 | #define uhci_result_interrupt uhci_result_common |
||
1279 | |||
1280 | /* |
||
1281 | * Isochronous transfers |
||
1282 | */ |
||
1283 | static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) |
||
1284 | { |
||
1285 | struct urb *last_urb = NULL; |
||
1286 | struct list_head *tmp, *head; |
||
1287 | int ret = 0; |
||
1288 | |||
1289 | head = &uhci->urb_list; |
||
1290 | tmp = head->next; |
||
1291 | while (tmp != head) { |
||
1292 | struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
||
1293 | struct urb *u = up->urb; |
||
1294 | |||
1295 | tmp = tmp->next; |
||
1296 | |||
1297 | /* look for pending URB's with identical pipe handle */ |
||
1298 | if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && |
||
1299 | (u->status == -EINPROGRESS) && (u != urb)) { |
||
1300 | if (!last_urb) |
||
1301 | *start = u->start_frame; |
||
1302 | last_urb = u; |
||
1303 | } |
||
1304 | } |
||
1305 | |||
1306 | if (last_urb) { |
||
1307 | *end = (last_urb->start_frame + last_urb->number_of_packets * |
||
1308 | last_urb->interval) & (UHCI_NUMFRAMES-1); |
||
1309 | ret = 0; |
||
1310 | } else |
||
1311 | ret = -1; /* no previous urb found */ |
||
1312 | |||
1313 | return ret; |
||
1314 | } |
||
1315 | |||
1316 | static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb) |
||
1317 | { |
||
1318 | int limits; |
||
1319 | unsigned int start = 0, end = 0; |
||
1320 | |||
1321 | if (urb->number_of_packets > 900) /* 900? Why? */ |
||
1322 | return -EFBIG; |
||
1323 | |||
1324 | limits = isochronous_find_limits(uhci, urb, &start, &end); |
||
1325 | |||
1326 | if (urb->transfer_flags & URB_ISO_ASAP) { |
||
1327 | if (limits) { |
||
1328 | int curframe; |
||
1329 | |||
1330 | curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES; |
||
1331 | urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES; |
||
1332 | } else |
||
1333 | urb->start_frame = end; |
||
1334 | } else { |
||
1335 | urb->start_frame %= UHCI_NUMFRAMES; |
||
1336 | /* FIXME: Sanity check */ |
||
1337 | } |
||
1338 | |||
1339 | return 0; |
||
1340 | } |
||
1341 | |||
1342 | /* |
||
1343 | * Isochronous transfers |
||
1344 | */ |
||
1345 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
||
1346 | { |
||
1347 | struct uhci_td *td; |
||
1348 | int i, ret, frame; |
||
1349 | int status, destination; |
||
1350 | |||
1351 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; |
||
1352 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); |
||
1353 | |||
1354 | ret = isochronous_find_start(uhci, urb); |
||
1355 | if (ret) |
||
1356 | return ret; |
||
1357 | |||
1358 | frame = urb->start_frame; |
||
1359 | for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) { |
||
1360 | if (!urb->iso_frame_desc[i].length) |
||
1361 | continue; |
||
1362 | |||
1363 | td = uhci_alloc_td(uhci, urb->dev); |
||
1364 | if (!td) |
||
1365 | return -ENOMEM; |
||
1366 | |||
1367 | uhci_add_td_to_urb(urb, td); |
||
1368 | uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1), |
||
1369 | urb->transfer_dma + urb->iso_frame_desc[i].offset); |
||
1370 | |||
1371 | if (i + 1 >= urb->number_of_packets) |
||
1372 | td->status |= cpu_to_le32(TD_CTRL_IOC); |
||
1373 | |||
1374 | uhci_insert_td_frame_list(uhci, td, frame); |
||
1375 | } |
||
1376 | |||
1377 | return -EINPROGRESS; |
||
1378 | } |
||
1379 | |||
1380 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) |
||
1381 | { |
||
1382 | struct list_head *tmp, *head; |
||
1383 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
1384 | int status; |
||
1385 | int i, ret = 0; |
||
1386 | |||
1387 | urb->actual_length = 0; |
||
1388 | |||
1389 | i = 0; |
||
1390 | head = &urbp->td_list; |
||
1391 | tmp = head->next; |
||
1392 | while (tmp != head) { |
||
1393 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
1394 | int actlength; |
||
1395 | |||
1396 | tmp = tmp->next; |
||
1397 | |||
1398 | if (td_status(td) & TD_CTRL_ACTIVE) |
||
1399 | return -EINPROGRESS; |
||
1400 | |||
1401 | actlength = uhci_actual_length(td_status(td)); |
||
1402 | urb->iso_frame_desc[i].actual_length = actlength; |
||
1403 | urb->actual_length += actlength; |
||
1404 | |||
1405 | status = uhci_map_status(uhci_status_bits(td_status(td)), usb_pipeout(urb->pipe)); |
||
1406 | urb->iso_frame_desc[i].status = status; |
||
1407 | if (status) { |
||
1408 | urb->error_count++; |
||
1409 | ret = status; |
||
1410 | } |
||
1411 | |||
1412 | i++; |
||
1413 | } |
||
1414 | |||
1415 | return ret; |
||
1416 | } |
||
1417 | |||
1418 | /* |
||
1419 | * MUST be called with uhci->urb_list_lock acquired |
||
1420 | */ |
||
1421 | static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) |
||
1422 | { |
||
1423 | struct list_head *tmp, *head; |
||
1424 | |||
1425 | /* We don't match Isoc transfers since they are special */ |
||
1426 | if (usb_pipeisoc(urb->pipe)) |
||
1427 | return NULL; |
||
1428 | |||
1429 | head = &uhci->urb_list; |
||
1430 | tmp = head->next; |
||
1431 | while (tmp != head) { |
||
1432 | struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
||
1433 | struct urb *u = up->urb; |
||
1434 | |||
1435 | tmp = tmp->next; |
||
1436 | |||
1437 | if (u->dev == urb->dev && u->status == -EINPROGRESS) { |
||
1438 | /* For control, ignore the direction */ |
||
1439 | if (usb_pipecontrol(urb->pipe) && |
||
1440 | (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) |
||
1441 | return u; |
||
1442 | else if (u->pipe == urb->pipe) |
||
1443 | return u; |
||
1444 | } |
||
1445 | } |
||
1446 | |||
1447 | return NULL; |
||
1448 | } |
||
1449 | |||
1450 | static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags) |
||
1451 | { |
||
1452 | int ret = -EINVAL; |
||
1453 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1454 | unsigned long flags; |
||
1455 | struct urb *eurb; |
||
1456 | int bustime; |
||
1457 | |||
1458 | spin_lock_irqsave(&uhci->urb_list_lock, flags); |
||
1459 | |||
1460 | eurb = uhci_find_urb_ep(uhci, urb); |
||
1461 | |||
1462 | if (!uhci_alloc_urb_priv(uhci, urb)) { |
||
1463 | spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
||
1464 | return -ENOMEM; |
||
1465 | } |
||
1466 | |||
1467 | switch (usb_pipetype(urb->pipe)) { |
||
1468 | case PIPE_CONTROL: |
||
1469 | ret = uhci_submit_control(uhci, urb, eurb); |
||
1470 | break; |
||
1471 | case PIPE_INTERRUPT: |
||
1472 | if (!eurb) { |
||
1473 | bustime = usb_check_bandwidth(urb->dev, urb); |
||
1474 | if (bustime < 0) |
||
1475 | ret = bustime; |
||
1476 | else { |
||
1477 | ret = uhci_submit_interrupt(uhci, urb, eurb); |
||
1478 | if (ret == -EINPROGRESS) |
||
1479 | usb_claim_bandwidth(urb->dev, urb, bustime, 0); |
||
1480 | } |
||
1481 | } else { /* inherit from parent */ |
||
1482 | urb->bandwidth = eurb->bandwidth; |
||
1483 | ret = uhci_submit_interrupt(uhci, urb, eurb); |
||
1484 | } |
||
1485 | break; |
||
1486 | case PIPE_BULK: |
||
1487 | ret = uhci_submit_bulk(uhci, urb, eurb); |
||
1488 | break; |
||
1489 | case PIPE_ISOCHRONOUS: |
||
1490 | bustime = usb_check_bandwidth(urb->dev, urb); |
||
1491 | if (bustime < 0) { |
||
1492 | ret = bustime; |
||
1493 | break; |
||
1494 | } |
||
1495 | |||
1496 | ret = uhci_submit_isochronous(uhci, urb); |
||
1497 | if (ret == -EINPROGRESS) |
||
1498 | usb_claim_bandwidth(urb->dev, urb, bustime, 1); |
||
1499 | break; |
||
1500 | } |
||
1501 | |||
1502 | if (ret != -EINPROGRESS) { |
||
1503 | /* Submit failed, so delete it from the urb_list */ |
||
1504 | struct urb_priv *urbp = urb->hcpriv; |
||
1505 | |||
1506 | list_del_init(&urbp->urb_list); |
||
1507 | spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
||
1508 | uhci_destroy_urb_priv (uhci, urb); |
||
1509 | |||
1510 | return ret; |
||
1511 | } |
||
1512 | |||
1513 | spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
||
1514 | |||
1515 | return 0; |
||
1516 | } |
||
1517 | |||
1518 | /* |
||
1519 | * Return the result of a transfer |
||
1520 | * |
||
1521 | * MUST be called with urb_list_lock acquired |
||
1522 | */ |
||
1523 | static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) |
||
1524 | { |
||
1525 | int ret = -EINVAL; |
||
1526 | unsigned long flags; |
||
1527 | struct urb_priv *urbp; |
||
1528 | |||
1529 | spin_lock_irqsave(&urb->lock, flags); |
||
1530 | |||
1531 | urbp = (struct urb_priv *)urb->hcpriv; |
||
1532 | |||
1533 | if (urb->status != -EINPROGRESS) { |
||
1534 | info("uhci_transfer_result: called for URB %p not in flight?", urb); |
||
1535 | goto out; |
||
1536 | } |
||
1537 | |||
1538 | switch (usb_pipetype(urb->pipe)) { |
||
1539 | case PIPE_CONTROL: |
||
1540 | ret = uhci_result_control(uhci, urb); |
||
1541 | break; |
||
1542 | case PIPE_INTERRUPT: |
||
1543 | ret = uhci_result_interrupt(uhci, urb); |
||
1544 | break; |
||
1545 | case PIPE_BULK: |
||
1546 | ret = uhci_result_bulk(uhci, urb); |
||
1547 | break; |
||
1548 | case PIPE_ISOCHRONOUS: |
||
1549 | ret = uhci_result_isochronous(uhci, urb); |
||
1550 | break; |
||
1551 | } |
||
1552 | |||
1553 | urbp->status = ret; |
||
1554 | |||
1555 | if (ret == -EINPROGRESS) |
||
1556 | goto out; |
||
1557 | |||
1558 | switch (usb_pipetype(urb->pipe)) { |
||
1559 | case PIPE_CONTROL: |
||
1560 | case PIPE_BULK: |
||
1561 | case PIPE_ISOCHRONOUS: |
||
1562 | /* Release bandwidth for Interrupt or Isoc. transfers */ |
||
1563 | /* Spinlock needed ? */ |
||
1564 | if (urb->bandwidth) |
||
1565 | usb_release_bandwidth(urb->dev, urb, 1); |
||
1566 | uhci_unlink_generic(uhci, urb); |
||
1567 | break; |
||
1568 | case PIPE_INTERRUPT: |
||
1569 | /* Release bandwidth for Interrupt or Isoc. transfers */ |
||
1570 | /* Make sure we don't release if we have a queued URB */ |
||
1571 | spin_lock(&uhci->frame_list_lock); |
||
1572 | /* Spinlock needed ? */ |
||
1573 | if (list_empty(&urbp->queue_list) && urb->bandwidth) |
||
1574 | usb_release_bandwidth(urb->dev, urb, 0); |
||
1575 | else |
||
1576 | /* bandwidth was passed on to queued URB, */ |
||
1577 | /* so don't let usb_unlink_urb() release it */ |
||
1578 | urb->bandwidth = 0; |
||
1579 | spin_unlock(&uhci->frame_list_lock); |
||
1580 | uhci_unlink_generic(uhci, urb); |
||
1581 | break; |
||
1582 | default: |
||
1583 | info("uhci_transfer_result: unknown pipe type %d for urb %p\n", |
||
1584 | usb_pipetype(urb->pipe), urb); |
||
1585 | } |
||
1586 | |||
1587 | /* Remove it from uhci->urb_list */ |
||
1588 | list_del_init(&urbp->urb_list); |
||
1589 | |||
1590 | uhci_add_complete(uhci, urb); |
||
1591 | |||
1592 | out: |
||
1593 | spin_unlock_irqrestore(&urb->lock, flags); |
||
1594 | } |
||
1595 | |||
1596 | /* |
||
1597 | * MUST be called with urb->lock acquired |
||
1598 | */ |
||
1599 | static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb) |
||
1600 | { |
||
1601 | struct list_head *head, *tmp; |
||
1602 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
1603 | int prevactive = 1; |
||
1604 | |||
1605 | /* We can get called when urbp allocation fails, so check */ |
||
1606 | if (!urbp) |
||
1607 | return; |
||
1608 | |||
1609 | uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ |
||
1610 | |||
1611 | /* |
||
1612 | * Now we need to find out what the last successful toggle was |
||
1613 | * so we can update the local data toggle for the next transfer |
||
1614 | * |
||
1615 | * There's 3 way's the last successful completed TD is found: |
||
1616 | * |
||
1617 | * 1) The TD is NOT active and the actual length < expected length |
||
1618 | * 2) The TD is NOT active and it's the last TD in the chain |
||
1619 | * 3) The TD is active and the previous TD is NOT active |
||
1620 | * |
||
1621 | * Control and Isochronous ignore the toggle, so this is safe |
||
1622 | * for all types |
||
1623 | */ |
||
1624 | head = &urbp->td_list; |
||
1625 | tmp = head->next; |
||
1626 | while (tmp != head) { |
||
1627 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
1628 | |||
1629 | tmp = tmp->next; |
||
1630 | |||
1631 | if (!(td_status(td) & TD_CTRL_ACTIVE) && |
||
1632 | (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) || |
||
1633 | tmp == head)) |
||
1634 | usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
||
1635 | uhci_packetout(td_token(td)), |
||
1636 | uhci_toggle(td_token(td)) ^ 1); |
||
1637 | else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive) |
||
1638 | usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), |
||
1639 | uhci_packetout(td_token(td)), |
||
1640 | uhci_toggle(td_token(td))); |
||
1641 | |||
1642 | prevactive = td_status(td) & TD_CTRL_ACTIVE; |
||
1643 | } |
||
1644 | |||
1645 | uhci_delete_queued_urb(uhci, urb); |
||
1646 | |||
1647 | /* The interrupt loop will reclaim the QH's */ |
||
1648 | uhci_remove_qh(uhci, urbp->qh); |
||
1649 | urbp->qh = NULL; |
||
1650 | } |
||
1651 | |||
1652 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) |
||
1653 | { |
||
1654 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1655 | unsigned long flags; |
||
1656 | struct urb_priv *urbp = urb->hcpriv; |
||
1657 | |||
1658 | /* If this is an interrupt URB that is being killed in urb->complete, */ |
||
1659 | /* then just set its status and return */ |
||
1660 | if (!urbp) { |
||
1661 | urb->status = -ECONNRESET; |
||
1662 | return 0; |
||
1663 | } |
||
1664 | |||
1665 | spin_lock_irqsave(&uhci->urb_list_lock, flags); |
||
1666 | |||
1667 | list_del_init(&urbp->urb_list); |
||
1668 | |||
1669 | uhci_unlink_generic(uhci, urb); |
||
1670 | |||
1671 | spin_lock(&uhci->urb_remove_list_lock); |
||
1672 | |||
1673 | /* If we're the first, set the next interrupt bit */ |
||
1674 | if (list_empty(&uhci->urb_remove_list)) |
||
1675 | uhci_set_next_interrupt(uhci); |
||
1676 | list_add(&urbp->urb_list, &uhci->urb_remove_list); |
||
1677 | |||
1678 | spin_unlock(&uhci->urb_remove_list_lock); |
||
1679 | spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
||
1680 | return 0; |
||
1681 | } |
||
1682 | |||
1683 | static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) |
||
1684 | { |
||
1685 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
1686 | struct list_head *head, *tmp; |
||
1687 | int count = 0; |
||
1688 | |||
1689 | uhci_dec_fsbr(uhci, urb); |
||
1690 | |||
1691 | urbp->fsbr_timeout = 1; |
||
1692 | |||
1693 | /* |
||
1694 | * Ideally we would want to fix qh->element as well, but it's |
||
1695 | * read/write by the HC, so that can introduce a race. It's not |
||
1696 | * really worth the hassle |
||
1697 | */ |
||
1698 | |||
1699 | head = &urbp->td_list; |
||
1700 | tmp = head->next; |
||
1701 | while (tmp != head) { |
||
1702 | struct uhci_td *td = list_entry(tmp, struct uhci_td, list); |
||
1703 | |||
1704 | tmp = tmp->next; |
||
1705 | |||
1706 | /* |
||
1707 | * Make sure we don't do the last one (since it'll have the |
||
1708 | * TERM bit set) as well as we skip every so many TD's to |
||
1709 | * make sure it doesn't hog the bandwidth |
||
1710 | */ |
||
1711 | if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1)) |
||
1712 | td->link |= UHCI_PTR_DEPTH; |
||
1713 | |||
1714 | count++; |
||
1715 | } |
||
1716 | |||
1717 | return 0; |
||
1718 | } |
||
1719 | |||
1720 | /* |
||
1721 | * uhci_get_current_frame_number() |
||
1722 | * |
||
1723 | * returns the current frame number for a USB bus/controller. |
||
1724 | */ |
||
1725 | static int uhci_get_current_frame_number(struct uhci_hcd *uhci) |
||
1726 | { |
||
1727 | return inw(uhci->io_addr + USBFRNUM); |
||
1728 | } |
||
1729 | |||
1730 | static int init_stall_timer(struct usb_hcd *hcd); |
||
1731 | |||
1732 | static void stall_callback(unsigned long ptr) |
||
1733 | { |
||
1734 | struct usb_hcd *hcd = (struct usb_hcd *)ptr; |
||
1735 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1736 | struct list_head list, *tmp, *head; |
||
1737 | unsigned long flags; |
||
1738 | |||
1739 | INIT_LIST_HEAD(&list); |
||
1740 | |||
1741 | spin_lock_irqsave(&uhci->urb_list_lock, flags); |
||
1742 | head = &uhci->urb_list; |
||
1743 | tmp = head->next; |
||
1744 | while (tmp != head) { |
||
1745 | struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
||
1746 | struct urb *u = up->urb; |
||
1747 | |||
1748 | tmp = tmp->next; |
||
1749 | |||
1750 | spin_lock(&u->lock); |
||
1751 | |||
1752 | /* Check if the FSBR timed out */ |
||
1753 | if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies26, up->fsbrtime + IDLE_TIMEOUT)) |
||
1754 | uhci_fsbr_timeout(uhci, u); |
||
1755 | |||
1756 | /* Check if the URB timed out */ |
||
1757 | if (u->timeout && time_after_eq(jiffies26, up->inserttime + u->timeout)) |
||
1758 | list_move_tail(&up->urb_list, &list); |
||
1759 | |||
1760 | spin_unlock(&u->lock); |
||
1761 | } |
||
1762 | spin_unlock_irqrestore(&uhci->urb_list_lock, flags); |
||
1763 | |||
1764 | head = &list; |
||
1765 | tmp = head->next; |
||
1766 | while (tmp != head) { |
||
1767 | struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); |
||
1768 | struct urb *u = up->urb; |
||
1769 | |||
1770 | tmp = tmp->next; |
||
1771 | |||
1772 | uhci_urb_dequeue(hcd, u); |
||
1773 | } |
||
1774 | |||
1775 | /* Really disable FSBR */ |
||
1776 | if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies26, uhci->fsbrtimeout)) { |
||
1777 | uhci->fsbrtimeout = 0; |
||
1778 | uhci->skel_term_qh->link = UHCI_PTR_TERM; |
||
1779 | } |
||
1780 | |||
1781 | /* Poll for and perform state transitions */ |
||
1782 | hc_state_transitions(uhci); |
||
1783 | |||
1784 | init_stall_timer(hcd); |
||
1785 | } |
||
1786 | |||
1787 | static int init_stall_timer(struct usb_hcd *hcd) |
||
1788 | { |
||
1789 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1790 | |||
1791 | init_timer(&uhci->stall_timer); |
||
1792 | uhci->stall_timer.function = stall_callback; |
||
1793 | uhci->stall_timer.data = (unsigned long)hcd; |
||
1794 | uhci->stall_timer.expires = jiffies26 + (HZ / 10); |
||
1795 | add_timer(&uhci->stall_timer); |
||
1796 | |||
1797 | return 0; |
||
1798 | } |
||
1799 | |||
1800 | static void uhci_free_pending_qhs(struct uhci_hcd *uhci) |
||
1801 | { |
||
1802 | struct list_head *tmp, *head; |
||
1803 | unsigned long flags; |
||
1804 | |||
1805 | spin_lock_irqsave(&uhci->qh_remove_list_lock, flags); |
||
1806 | head = &uhci->qh_remove_list; |
||
1807 | tmp = head->next; |
||
1808 | while (tmp != head) { |
||
1809 | struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list); |
||
1810 | |||
1811 | tmp = tmp->next; |
||
1812 | |||
1813 | list_del_init(&qh->remove_list); |
||
1814 | |||
1815 | uhci_free_qh(uhci, qh); |
||
1816 | } |
||
1817 | spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags); |
||
1818 | } |
||
1819 | |||
1820 | static void uhci_free_pending_tds(struct uhci_hcd *uhci) |
||
1821 | { |
||
1822 | struct list_head *tmp, *head; |
||
1823 | unsigned long flags; |
||
1824 | |||
1825 | spin_lock_irqsave(&uhci->td_remove_list_lock, flags); |
||
1826 | head = &uhci->td_remove_list; |
||
1827 | tmp = head->next; |
||
1828 | while (tmp != head) { |
||
1829 | struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list); |
||
1830 | |||
1831 | tmp = tmp->next; |
||
1832 | |||
1833 | list_del_init(&td->remove_list); |
||
1834 | |||
1835 | uhci_free_td(uhci, td); |
||
1836 | } |
||
1837 | spin_unlock_irqrestore(&uhci->td_remove_list_lock, flags); |
||
1838 | } |
||
1839 | |||
1840 | static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) |
||
1841 | { |
||
1842 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; |
||
1843 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1844 | int status; |
||
1845 | unsigned long flags; |
||
1846 | |||
1847 | spin_lock_irqsave(&urb->lock, flags); |
||
1848 | status = urbp->status; |
||
1849 | uhci_destroy_urb_priv(uhci, urb); |
||
1850 | |||
1851 | if (urb->status != -ENOENT && urb->status != -ECONNRESET) |
||
1852 | urb->status = status; |
||
1853 | spin_unlock_irqrestore(&urb->lock, flags); |
||
1854 | |||
1855 | usb_hcd_giveback_urb(hcd, urb, regs); |
||
1856 | } |
||
1857 | |||
1858 | static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs) |
||
1859 | { |
||
1860 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1861 | struct list_head *tmp, *head; |
||
1862 | unsigned long flags; |
||
1863 | |||
1864 | spin_lock_irqsave(&uhci->complete_list_lock, flags); |
||
1865 | head = &uhci->complete_list; |
||
1866 | tmp = head->next; |
||
1867 | while (tmp != head) { |
||
1868 | struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list); |
||
1869 | struct urb *urb = urbp->urb; |
||
1870 | |||
1871 | list_del_init(&urbp->complete_list); |
||
1872 | spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
||
1873 | |||
1874 | uhci_finish_urb(hcd, urb, regs); |
||
1875 | |||
1876 | spin_lock_irqsave(&uhci->complete_list_lock, flags); |
||
1877 | head = &uhci->complete_list; |
||
1878 | tmp = head->next; |
||
1879 | } |
||
1880 | spin_unlock_irqrestore(&uhci->complete_list_lock, flags); |
||
1881 | } |
||
1882 | |||
1883 | static void uhci_remove_pending_qhs(struct uhci_hcd *uhci) |
||
1884 | { |
||
1885 | struct list_head *tmp, *head; |
||
1886 | unsigned long flags; |
||
1887 | |||
1888 | spin_lock_irqsave(&uhci->urb_remove_list_lock, flags); |
||
1889 | head = &uhci->urb_remove_list; |
||
1890 | tmp = head->next; |
||
1891 | while (tmp != head) { |
||
1892 | struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
||
1893 | struct urb *urb = urbp->urb; |
||
1894 | |||
1895 | tmp = tmp->next; |
||
1896 | |||
1897 | list_del_init(&urbp->urb_list); |
||
1898 | |||
1899 | urbp->status = urb->status = -ECONNRESET; |
||
1900 | |||
1901 | uhci_add_complete(uhci, urb); |
||
1902 | } |
||
1903 | spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags); |
||
1904 | } |
||
1905 | |||
1906 | static void uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) |
||
1907 | { |
||
1908 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
1909 | unsigned int io_addr = uhci->io_addr; |
||
1910 | unsigned short status; |
||
1911 | struct list_head *tmp, *head; |
||
1912 | |||
1913 | static int count =0; |
||
1914 | |||
1915 | /* |
||
1916 | * Read the interrupt status, and write it back to clear the |
||
1917 | * interrupt cause |
||
1918 | */ |
||
1919 | status = inw(io_addr + USBSTS); |
||
1920 | if (!status) /* shared interrupt, not mine */ |
||
1921 | return; |
||
1922 | outw(status, io_addr + USBSTS); /* Clear it */ |
||
1923 | |||
1924 | // printk("%x uhci_irq\n", io_addr); |
||
1925 | |||
1926 | if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { |
||
1927 | if (status & USBSTS_HSE) |
||
1928 | { |
||
1929 | err("%x: host system error, PCI problems?", io_addr); |
||
1930 | } |
||
1931 | if (status & USBSTS_HCPE) |
||
1932 | err("%x: host controller process error. something bad happened", io_addr); |
||
1933 | if ((status & USBSTS_HCH) && uhci->state > 0) { |
||
1934 | err("%x: host controller halted. very bad", io_addr); |
||
1935 | /* FIXME: Reset the controller, fix the offending TD */ |
||
1936 | } |
||
1937 | } |
||
1938 | |||
1939 | if (status & USBSTS_RD) |
||
1940 | uhci->resume_detect = 1; |
||
1941 | |||
1942 | uhci_free_pending_qhs(uhci); |
||
1943 | |||
1944 | uhci_free_pending_tds(uhci); |
||
1945 | |||
1946 | uhci_remove_pending_qhs(uhci); |
||
1947 | |||
1948 | uhci_clear_next_interrupt(uhci); |
||
1949 | |||
1950 | /* Walk the list of pending URB's to see which ones completed */ |
||
1951 | spin_lock(&uhci->urb_list_lock); |
||
1952 | head = &uhci->urb_list; |
||
1953 | tmp = head->next; |
||
1954 | while (tmp != head) { |
||
1955 | struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); |
||
1956 | struct urb *urb = urbp->urb; |
||
1957 | |||
1958 | tmp = tmp->next; |
||
1959 | |||
1960 | /* Checks the status and does all of the magic necessary */ |
||
1961 | uhci_transfer_result(uhci, urb); |
||
1962 | } |
||
1963 | spin_unlock(&uhci->urb_list_lock); |
||
1964 | |||
1965 | uhci_finish_completion(hcd, regs); |
||
1966 | } |
||
1967 | |||
1968 | static void reset_hc(struct uhci_hcd *uhci) |
||
1969 | { |
||
1970 | unsigned int io_addr = uhci->io_addr; |
||
1971 | |||
1972 | /* Global reset for 50ms */ |
||
1973 | uhci->state = UHCI_RESET; |
||
1974 | outw(USBCMD_GRESET, io_addr + USBCMD); |
||
1975 | set_current_state(TASK_UNINTERRUPTIBLE); |
||
1976 | schedule_timeout((HZ*50+999) / 1000); |
||
1977 | outw(0, io_addr + USBCMD); |
||
1978 | |||
1979 | /* Another 10ms delay */ |
||
1980 | set_current_state(TASK_UNINTERRUPTIBLE); |
||
1981 | schedule_timeout((HZ*10+999) / 1000); |
||
1982 | uhci->resume_detect = 0; |
||
1983 | } |
||
1984 | |||
1985 | static void suspend_hc(struct uhci_hcd *uhci) |
||
1986 | { |
||
1987 | unsigned int io_addr = uhci->io_addr; |
||
1988 | |||
1989 | dbg("%x: suspend_hc", io_addr); |
||
1990 | uhci->state = UHCI_SUSPENDED; |
||
1991 | uhci->resume_detect = 0; |
||
1992 | outw(USBCMD_EGSM, io_addr + USBCMD); |
||
1993 | } |
||
1994 | |||
1995 | static void wakeup_hc(struct uhci_hcd *uhci) |
||
1996 | { |
||
1997 | unsigned int io_addr = uhci->io_addr; |
||
1998 | |||
1999 | switch (uhci->state) { |
||
2000 | case UHCI_SUSPENDED: /* Start the resume */ |
||
2001 | dbg("%x: wakeup_hc", io_addr); |
||
2002 | |||
2003 | /* Global resume for >= 20ms */ |
||
2004 | outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD); |
||
2005 | uhci->state = UHCI_RESUMING_1; |
||
2006 | uhci->state_end = jiffies26 + (20*HZ+999) / 1000; |
||
2007 | break; |
||
2008 | |||
2009 | case UHCI_RESUMING_1: /* End global resume */ |
||
2010 | uhci->state = UHCI_RESUMING_2; |
||
2011 | outw(0, io_addr + USBCMD); |
||
2012 | /* Falls through */ |
||
2013 | |||
2014 | case UHCI_RESUMING_2: /* Wait for EOP to be sent */ |
||
2015 | if (inw(io_addr + USBCMD) & USBCMD_FGR) |
||
2016 | break; |
||
2017 | |||
2018 | /* Run for at least 1 second, and |
||
2019 | * mark it configured with a 64-byte max packet */ |
||
2020 | uhci->state = UHCI_RUNNING_GRACE; |
||
2021 | uhci->state_end = jiffies26 + HZ; |
||
2022 | outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, |
||
2023 | io_addr + USBCMD); |
||
2024 | break; |
||
2025 | |||
2026 | case UHCI_RUNNING_GRACE: /* Now allowed to suspend */ |
||
2027 | uhci->state = UHCI_RUNNING; |
||
2028 | break; |
||
2029 | |||
2030 | default: |
||
2031 | break; |
||
2032 | } |
||
2033 | } |
||
2034 | |||
2035 | static int ports_active(struct uhci_hcd *uhci) |
||
2036 | { |
||
2037 | unsigned int io_addr = uhci->io_addr; |
||
2038 | int connection = 0; |
||
2039 | int i; |
||
2040 | |||
2041 | for (i = 0; i < uhci->rh_numports; i++) |
||
2042 | connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS); |
||
2043 | |||
2044 | return connection; |
||
2045 | } |
||
2046 | |||
2047 | static int suspend_allowed(struct uhci_hcd *uhci) |
||
2048 | { |
||
2049 | unsigned int io_addr = uhci->io_addr; |
||
2050 | int i; |
||
2051 | |||
2052 | if (!uhci->hcd.pdev || uhci->hcd.pdev->vendor != PCI_VENDOR_ID_INTEL) |
||
2053 | return 1; |
||
2054 | |||
2055 | /* Some of Intel's USB controllers have a bug that causes false |
||
2056 | * resume indications if any port has an over current condition. |
||
2057 | * To prevent problems, we will not allow a global suspend if |
||
2058 | * any ports are OC. |
||
2059 | * |
||
2060 | * Some motherboards using Intel's chipsets (but not using all |
||
2061 | * the USB ports) appear to hardwire the over current inputs active |
||
2062 | * to disable the USB ports. |
||
2063 | */ |
||
2064 | |||
2065 | /* check for over current condition on any port */ |
||
2066 | for (i = 0; i < uhci->rh_numports; i++) { |
||
2067 | if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC) |
||
2068 | return 0; |
||
2069 | } |
||
2070 | |||
2071 | return 1; |
||
2072 | } |
||
2073 | |||
2074 | static void hc_state_transitions(struct uhci_hcd *uhci) |
||
2075 | { |
||
2076 | switch (uhci->state) { |
||
2077 | case UHCI_RUNNING: |
||
2078 | |||
2079 | /* global suspend if nothing connected for 1 second */ |
||
2080 | if (!ports_active(uhci) && suspend_allowed(uhci)) { |
||
2081 | uhci->state = UHCI_SUSPENDING_GRACE; |
||
2082 | uhci->state_end = jiffies26 + HZ; |
||
2083 | } |
||
2084 | break; |
||
2085 | |||
2086 | case UHCI_SUSPENDING_GRACE: |
||
2087 | if (ports_active(uhci)) |
||
2088 | uhci->state = UHCI_RUNNING; |
||
2089 | else if (time_after_eq(jiffies26, uhci->state_end)) |
||
2090 | suspend_hc(uhci); |
||
2091 | break; |
||
2092 | |||
2093 | case UHCI_SUSPENDED: |
||
2094 | |||
2095 | /* wakeup if requested by a device */ |
||
2096 | if (uhci->resume_detect) |
||
2097 | wakeup_hc(uhci); |
||
2098 | break; |
||
2099 | |||
2100 | case UHCI_RESUMING_1: |
||
2101 | case UHCI_RESUMING_2: |
||
2102 | case UHCI_RUNNING_GRACE: |
||
2103 | if (time_after_eq(jiffies26, uhci->state_end)) |
||
2104 | wakeup_hc(uhci); |
||
2105 | break; |
||
2106 | |||
2107 | default: |
||
2108 | break; |
||
2109 | } |
||
2110 | } |
||
2111 | |||
2112 | static void start_hc(struct uhci_hcd *uhci) |
||
2113 | { |
||
2114 | unsigned int io_addr = uhci->io_addr; |
||
2115 | int timeout = 1000; |
||
2116 | |||
2117 | /* |
||
2118 | * Reset the HC - this will force us to get a |
||
2119 | * new notification of any already connected |
||
2120 | * ports due to the virtual disconnect that it |
||
2121 | * implies. |
||
2122 | */ |
||
2123 | outw(USBCMD_HCRESET, io_addr + USBCMD); |
||
2124 | while (inw(io_addr + USBCMD) & USBCMD_HCRESET) { |
||
2125 | if (!--timeout) { |
||
2126 | printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n"); |
||
2127 | break; |
||
2128 | } |
||
2129 | } |
||
2130 | |||
2131 | /* Turn on all interrupts */ |
||
2132 | outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, |
||
2133 | io_addr + USBINTR); |
||
2134 | |||
2135 | /* Start at frame 0 */ |
||
2136 | outw(0, io_addr + USBFRNUM); |
||
2137 | outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD); |
||
2138 | |||
2139 | /* Run and mark it configured with a 64-byte max packet */ |
||
2140 | uhci->state = UHCI_RUNNING_GRACE; |
||
2141 | uhci->state_end = jiffies26 + HZ; |
||
2142 | outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD); |
||
2143 | |||
2144 | uhci->hcd.state = USB_STATE_RUNNING; |
||
2145 | |||
2146 | #ifdef DEB |
||
2147 | { |
||
2148 | __u32 *tdp; |
||
2149 | int i; |
||
2150 | int status = inw(io_addr + USBSTS); |
||
2151 | printk(KERN_INFO "[%x] Frame = %d Status =%x fl=%x\n", io_addr, inw(io_addr + USBFRNUM), status, uhci->fl->dma_handle); |
||
2152 | for (i=0; i<20; i++) |
||
2153 | { |
||
2154 | int status = inw(io_addr + USBSTS); |
||
2155 | wait_ms26(500); |
||
2156 | tdp=(__u32*)uhci->fl->frame[i]; |
||
2157 | printk(KERN_INFO "[%x] Frame[%d] -> @%x = %x status=%x fl=%x\n", io_addr, i, uhci->fl->frame[i], *tdp, status, uhci->fl->dma_handle ); |
||
2158 | } |
||
2159 | } |
||
2160 | #endif |
||
2161 | |||
2162 | } |
||
2163 | |||
2164 | /* |
||
2165 | * De-allocate all resources.. |
||
2166 | */ |
||
2167 | static void release_uhci(struct uhci_hcd *uhci) |
||
2168 | { |
||
2169 | int i; |
||
2170 | |||
2171 | for (i = 0; i < UHCI_NUM_SKELQH; i++) |
||
2172 | if (uhci->skelqh[i]) { |
||
2173 | uhci_free_qh(uhci, uhci->skelqh[i]); |
||
2174 | uhci->skelqh[i] = NULL; |
||
2175 | } |
||
2176 | |||
2177 | if (uhci->term_td) { |
||
2178 | uhci_free_td(uhci, uhci->term_td); |
||
2179 | uhci->term_td = NULL; |
||
2180 | } |
||
2181 | |||
2182 | if (uhci->qh_pool) { |
||
2183 | pci_pool_destroy(uhci->qh_pool); |
||
2184 | uhci->qh_pool = NULL; |
||
2185 | } |
||
2186 | |||
2187 | if (uhci->td_pool) { |
||
2188 | pci_pool_destroy(uhci->td_pool); |
||
2189 | uhci->td_pool = NULL; |
||
2190 | } |
||
2191 | |||
2192 | if (uhci->fl) { |
||
2193 | pci_free_consistent(uhci->hcd.pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
||
2194 | uhci->fl = NULL; |
||
2195 | } |
||
2196 | |||
2197 | #ifdef CONFIG_PROC_FS |
||
2198 | if (uhci->proc_entry) { |
||
2199 | remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root); |
||
2200 | uhci->proc_entry = NULL; |
||
2201 | } |
||
2202 | #endif |
||
2203 | } |
||
2204 | |||
2205 | static int uhci_reset(struct usb_hcd *hcd) |
||
2206 | { |
||
2207 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
2208 | |||
2209 | uhci->io_addr = (unsigned long) hcd->regs; |
||
2210 | |||
2211 | /* Maybe kick BIOS off this hardware. Then reset, so we won't get |
||
2212 | * interrupts from any previous setup. |
||
2213 | */ |
||
2214 | reset_hc(uhci); |
||
2215 | pci_write_config_word(hcd->pdev, USBLEGSUP, USBLEGSUP_DEFAULT); |
||
2216 | return 0; |
||
2217 | } |
||
2218 | |||
2219 | /* |
||
2220 | * Allocate a frame list, and then setup the skeleton |
||
2221 | * |
||
2222 | * The hardware doesn't really know any difference |
||
2223 | * in the queues, but the order does matter for the |
||
2224 | * protocols higher up. The order is: |
||
2225 | * |
||
2226 | * - any isochronous events handled before any |
||
2227 | * of the queues. We don't do that here, because |
||
2228 | * we'll create the actual TD entries on demand. |
||
2229 | * - The first queue is the interrupt queue. |
||
2230 | * - The second queue is the control queue, split into low and high speed |
||
2231 | * - The third queue is bulk queue. |
||
2232 | * - The fourth queue is the bandwidth reclamation queue, which loops back |
||
2233 | * to the high speed control queue. |
||
2234 | */ |
||
2235 | static int uhci_start(struct usb_hcd *hcd) |
||
2236 | { |
||
2237 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
2238 | int retval = -EBUSY; |
||
2239 | int i, port; |
||
2240 | unsigned io_size; |
||
2241 | dma_addr_t dma_handle; |
||
2242 | struct usb_device *udev; |
||
2243 | #ifdef CONFIG_PROC_FS |
||
2244 | struct proc_dir_entry *ent; |
||
2245 | #endif |
||
2246 | |||
2247 | io_size = pci_resource_len(hcd->pdev, hcd->region); |
||
2248 | |||
2249 | #ifdef CONFIG_PROC_FS |
||
2250 | ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root); |
||
2251 | if (!ent) { |
||
2252 | err("couldn't create uhci proc entry"); |
||
2253 | retval = -ENOMEM; |
||
2254 | goto err_create_proc_entry; |
||
2255 | } |
||
2256 | |||
2257 | ent->data = uhci; |
||
2258 | ent->proc_fops = &uhci_proc_operations; |
||
2259 | ent->size = 0; |
||
2260 | uhci->proc_entry = ent; |
||
2261 | #endif |
||
2262 | |||
2263 | uhci->fsbr = 0; |
||
2264 | uhci->fsbrtimeout = 0; |
||
2265 | |||
2266 | spin_lock_init(&uhci->qh_remove_list_lock); |
||
2267 | INIT_LIST_HEAD(&uhci->qh_remove_list); |
||
2268 | |||
2269 | spin_lock_init(&uhci->td_remove_list_lock); |
||
2270 | INIT_LIST_HEAD(&uhci->td_remove_list); |
||
2271 | |||
2272 | spin_lock_init(&uhci->urb_remove_list_lock); |
||
2273 | INIT_LIST_HEAD(&uhci->urb_remove_list); |
||
2274 | |||
2275 | spin_lock_init(&uhci->urb_list_lock); |
||
2276 | INIT_LIST_HEAD(&uhci->urb_list); |
||
2277 | |||
2278 | spin_lock_init(&uhci->complete_list_lock); |
||
2279 | INIT_LIST_HEAD(&uhci->complete_list); |
||
2280 | |||
2281 | spin_lock_init(&uhci->frame_list_lock); |
||
2282 | |||
2283 | uhci->fl = pci_alloc_consistent(hcd->pdev, sizeof(*uhci->fl), &dma_handle); |
||
2284 | if (!uhci->fl) { |
||
2285 | err("unable to allocate consistent memory for frame list"); |
||
2286 | goto err_alloc_fl; |
||
2287 | } |
||
2288 | |||
2289 | memset((void *)uhci->fl, 0, sizeof(*uhci->fl)); |
||
2290 | uhci->fl->dma_handle = dma_handle; |
||
2291 | |||
2292 | uhci->td_pool = pci_pool_create("uhci_td", hcd->pdev, |
||
2293 | sizeof(struct uhci_td), 16, 0); |
||
2294 | if (!uhci->td_pool) { |
||
2295 | err("unable to create td pci_pool"); |
||
2296 | goto err_create_td_pool; |
||
2297 | } |
||
2298 | |||
2299 | uhci->qh_pool = pci_pool_create("uhci_qh", hcd->pdev, |
||
2300 | sizeof(struct uhci_qh), 16, 0); |
||
2301 | if (!uhci->qh_pool) { |
||
2302 | err("unable to create qh pci_pool"); |
||
2303 | goto err_create_qh_pool; |
||
2304 | } |
||
2305 | |||
2306 | /* Initialize the root hub */ |
||
2307 | |||
2308 | /* UHCI specs says devices must have 2 ports, but goes on to say */ |
||
2309 | /* they may have more but give no way to determine how many they */ |
||
2310 | /* have. However, according to the UHCI spec, Bit 7 is always set */ |
||
2311 | /* to 1. So we try to use this to our advantage */ |
||
2312 | for (port = 0; port < (io_size - 0x10) / 2; port++) { |
||
2313 | unsigned int portstatus; |
||
2314 | |||
2315 | portstatus = inw(uhci->io_addr + 0x10 + (port * 2)); |
||
2316 | if (!(portstatus & 0x0080)) |
||
2317 | break; |
||
2318 | } |
||
2319 | if (debug) |
||
2320 | info("detected %d ports", port); |
||
2321 | |||
2322 | /* This is experimental so anything less than 2 or greater than 8 is */ |
||
2323 | /* something weird and we'll ignore it */ |
||
2324 | if (port < 2 || port > 8) { |
||
2325 | info("port count misdetected? forcing to 2 ports"); |
||
2326 | port = 2; |
||
2327 | } |
||
2328 | |||
2329 | uhci->rh_numports = port; |
||
2330 | |||
2331 | hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self); |
||
2332 | if (!udev) { |
||
2333 | err("unable to allocate root hub"); |
||
2334 | goto err_alloc_root_hub; |
||
2335 | } |
||
2336 | |||
2337 | uhci->term_td = uhci_alloc_td(uhci, udev); |
||
2338 | if (!uhci->term_td) { |
||
2339 | err("unable to allocate terminating TD"); |
||
2340 | goto err_alloc_term_td; |
||
2341 | } |
||
2342 | |||
2343 | for (i = 0; i < UHCI_NUM_SKELQH; i++) { |
||
2344 | uhci->skelqh[i] = uhci_alloc_qh(uhci, udev); |
||
2345 | if (!uhci->skelqh[i]) { |
||
2346 | err("unable to allocate QH %d", i); |
||
2347 | goto err_alloc_skelqh; |
||
2348 | } |
||
2349 | } |
||
2350 | |||
2351 | /* |
||
2352 | * 8 Interrupt queues; link int2 to int1, int4 to int2, etc |
||
2353 | * then link int1 to control and control to bulk |
||
2354 | */ |
||
2355 | uhci->skel_int128_qh->link = cpu_to_le32(uhci->skel_int64_qh->dma_handle) | UHCI_PTR_QH; |
||
2356 | uhci->skel_int64_qh->link = cpu_to_le32(uhci->skel_int32_qh->dma_handle) | UHCI_PTR_QH; |
||
2357 | uhci->skel_int32_qh->link = cpu_to_le32(uhci->skel_int16_qh->dma_handle) | UHCI_PTR_QH; |
||
2358 | uhci->skel_int16_qh->link = cpu_to_le32(uhci->skel_int8_qh->dma_handle) | UHCI_PTR_QH; |
||
2359 | uhci->skel_int8_qh->link = cpu_to_le32(uhci->skel_int4_qh->dma_handle) | UHCI_PTR_QH; |
||
2360 | uhci->skel_int4_qh->link = cpu_to_le32(uhci->skel_int2_qh->dma_handle) | UHCI_PTR_QH; |
||
2361 | uhci->skel_int2_qh->link = cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; |
||
2362 | uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; |
||
2363 | |||
2364 | uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; |
||
2365 | uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; |
||
2366 | uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; |
||
2367 | |||
2368 | /* This dummy TD is to work around a bug in Intel PIIX controllers */ |
||
2369 | uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | |
||
2370 | (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); |
||
2371 | uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); |
||
2372 | |||
2373 | uhci->skel_term_qh->link = UHCI_PTR_TERM; |
||
2374 | uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); |
||
2375 | |||
2376 | /* |
||
2377 | * Fill the frame list: make all entries point to |
||
2378 | * the proper interrupt queue. |
||
2379 | * |
||
2380 | * This is probably silly, but it's a simple way to |
||
2381 | * scatter the interrupt queues in a way that gives |
||
2382 | * us a reasonable dynamic range for irq latencies. |
||
2383 | */ |
||
2384 | for (i = 0; i < UHCI_NUMFRAMES; i++) { |
||
2385 | int irq = 0; |
||
2386 | |||
2387 | if (i & 1) { |
||
2388 | irq++; |
||
2389 | if (i & 2) { |
||
2390 | irq++; |
||
2391 | if (i & 4) { |
||
2392 | irq++; |
||
2393 | if (i & 8) { |
||
2394 | irq++; |
||
2395 | if (i & 16) { |
||
2396 | irq++; |
||
2397 | if (i & 32) { |
||
2398 | irq++; |
||
2399 | if (i & 64) |
||
2400 | irq++; |
||
2401 | } |
||
2402 | } |
||
2403 | } |
||
2404 | } |
||
2405 | } |
||
2406 | } |
||
2407 | |||
2408 | /* Only place we don't use the frame list routines */ |
||
2409 | uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[7 - irq]->dma_handle); |
||
2410 | } |
||
2411 | |||
2412 | start_hc(uhci); |
||
2413 | |||
2414 | init_stall_timer(hcd); |
||
2415 | |||
2416 | udev->speed = USB_SPEED_FULL; |
||
2417 | |||
2418 | if (usb_register_root_hub(udev, &hcd->pdev->dev) != 0) { |
||
2419 | err("unable to start root hub"); |
||
2420 | retval = -ENOMEM; |
||
2421 | goto err_start_root_hub; |
||
2422 | } |
||
2423 | |||
2424 | return 0; |
||
2425 | |||
2426 | /* |
||
2427 | * error exits: |
||
2428 | */ |
||
2429 | err_start_root_hub: |
||
2430 | reset_hc(uhci); |
||
2431 | |||
2432 | del_timer_sync(&uhci->stall_timer); |
||
2433 | |||
2434 | err_alloc_skelqh: |
||
2435 | for (i = 0; i < UHCI_NUM_SKELQH; i++) |
||
2436 | if (uhci->skelqh[i]) { |
||
2437 | uhci_free_qh(uhci, uhci->skelqh[i]); |
||
2438 | uhci->skelqh[i] = NULL; |
||
2439 | } |
||
2440 | |||
2441 | uhci_free_td(uhci, uhci->term_td); |
||
2442 | uhci->term_td = NULL; |
||
2443 | |||
2444 | err_alloc_term_td: |
||
2445 | usb_put_dev(udev); |
||
2446 | hcd->self.root_hub = NULL; |
||
2447 | |||
2448 | err_alloc_root_hub: |
||
2449 | pci_pool_destroy(uhci->qh_pool); |
||
2450 | uhci->qh_pool = NULL; |
||
2451 | |||
2452 | err_create_qh_pool: |
||
2453 | pci_pool_destroy(uhci->td_pool); |
||
2454 | uhci->td_pool = NULL; |
||
2455 | |||
2456 | err_create_td_pool: |
||
2457 | pci_free_consistent(hcd->pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle); |
||
2458 | uhci->fl = NULL; |
||
2459 | |||
2460 | err_alloc_fl: |
||
2461 | #ifdef CONFIG_PROC_FS |
||
2462 | remove_proc_entry(hcd->self.bus_name, uhci_proc_root); |
||
2463 | uhci->proc_entry = NULL; |
||
2464 | |||
2465 | err_create_proc_entry: |
||
2466 | #endif |
||
2467 | |||
2468 | return retval; |
||
2469 | } |
||
2470 | |||
2471 | static void uhci_stop(struct usb_hcd *hcd) |
||
2472 | { |
||
2473 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
2474 | |||
2475 | del_timer_sync(&uhci->stall_timer); |
||
2476 | |||
2477 | /* |
||
2478 | * At this point, we're guaranteed that no new connects can be made |
||
2479 | * to this bus since there are no more parents |
||
2480 | */ |
||
2481 | uhci_free_pending_qhs(uhci); |
||
2482 | uhci_free_pending_tds(uhci); |
||
2483 | uhci_remove_pending_qhs(uhci); |
||
2484 | |||
2485 | reset_hc(uhci); |
||
2486 | |||
2487 | uhci_free_pending_qhs(uhci); |
||
2488 | uhci_free_pending_tds(uhci); |
||
2489 | |||
2490 | release_uhci(uhci); |
||
2491 | } |
||
2492 | |||
2493 | #ifdef CONFIG_PM |
||
2494 | static int uhci_suspend(struct usb_hcd *hcd, u32 state) |
||
2495 | { |
||
2496 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
2497 | |||
2498 | /* Don't try to suspend broken motherboards, reset instead */ |
||
2499 | if (suspend_allowed(uhci)) |
||
2500 | suspend_hc(uhci); |
||
2501 | else |
||
2502 | reset_hc(uhci); |
||
2503 | return 0; |
||
2504 | } |
||
2505 | |||
2506 | static int uhci_resume(struct usb_hcd *hcd) |
||
2507 | { |
||
2508 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
||
2509 | |||
2510 | pci_set_master(uhci->hcd.pdev); |
||
2511 | |||
2512 | if (uhci->state == UHCI_SUSPENDED) |
||
2513 | uhci->resume_detect = 1; |
||
2514 | else { |
||
2515 | reset_hc(uhci); |
||
2516 | start_hc(uhci); |
||
2517 | } |
||
2518 | uhci->hcd.state = USB_STATE_RUNNING; |
||
2519 | return 0; |
||
2520 | } |
||
2521 | #endif |
||
2522 | |||
2523 | static struct usb_hcd *uhci_hcd_alloc(void) |
||
2524 | { |
||
2525 | struct uhci_hcd *uhci; |
||
2526 | |||
2527 | uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL); |
||
2528 | if (!uhci) |
||
2529 | return NULL; |
||
2530 | |||
2531 | memset(uhci, 0, sizeof(*uhci)); |
||
2532 | uhci->hcd.product_desc = "UHCI Host Controller"; |
||
2533 | return &uhci->hcd; |
||
2534 | } |
||
2535 | |||
2536 | static void uhci_hcd_free(struct usb_hcd *hcd) |
||
2537 | { |
||
2538 | kfree(hcd_to_uhci(hcd)); |
||
2539 | } |
||
2540 | |||
2541 | static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) |
||
2542 | { |
||
2543 | return uhci_get_current_frame_number(hcd_to_uhci(hcd)); |
||
2544 | } |
||
2545 | |||
2546 | static const char hcd_name[] = "uhci_hcd"; |
||
2547 | |||
2548 | static const struct hc_driver uhci_driver = { |
||
2549 | .description = hcd_name, |
||
2550 | |||
2551 | /* Generic hardware linkage */ |
||
2552 | .irq = uhci_irq, |
||
2553 | .flags = HCD_USB11, |
||
2554 | |||
2555 | /* Basic lifecycle operations */ |
||
2556 | .reset = uhci_reset, |
||
2557 | .start = uhci_start, |
||
2558 | #ifdef CONFIG_PM |
||
2559 | .suspend = uhci_suspend, |
||
2560 | .resume = uhci_resume, |
||
2561 | #endif |
||
2562 | .stop = uhci_stop, |
||
2563 | |||
2564 | .hcd_alloc = uhci_hcd_alloc, |
||
2565 | .hcd_free = uhci_hcd_free, |
||
2566 | |||
2567 | .urb_enqueue = uhci_urb_enqueue, |
||
2568 | .urb_dequeue = uhci_urb_dequeue, |
||
2569 | |||
2570 | .get_frame_number = uhci_hcd_get_frame_number, |
||
2571 | |||
2572 | .hub_status_data = uhci_hub_status_data, |
||
2573 | .hub_control = uhci_hub_control, |
||
2574 | }; |
||
2575 | |||
2576 | static const struct pci_device_id uhci_pci_ids[] = { { |
||
2577 | /* handle any USB UHCI controller */ |
||
2578 | PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), |
||
2579 | .driver_data = (unsigned long) &uhci_driver, |
||
2580 | }, { /* end: all zeroes */ } |
||
2581 | }; |
||
2582 | |||
2583 | MODULE_DEVICE_TABLE(pci, uhci_pci_ids); |
||
2584 | |||
2585 | static struct pci_driver uhci_pci_driver = { |
||
2586 | .name = (char *)hcd_name, |
||
2587 | .id_table = uhci_pci_ids, |
||
2588 | |||
2589 | .probe = usb_hcd_pci_probe, |
||
2590 | .remove = usb_hcd_pci_remove, |
||
2591 | |||
2592 | #ifdef CONFIG_PM |
||
2593 | .suspend = usb_hcd_pci_suspend, |
||
2594 | .resume = usb_hcd_pci_resume, |
||
2595 | #endif /* PM */ |
||
2596 | }; |
||
2597 | |||
2598 | /*static*/ int __init uhci_hcd_init(void) |
||
2599 | { |
||
2600 | int retval = -ENOMEM; |
||
2601 | |||
2602 | info(DRIVER_DESC " " DRIVER_VERSION); |
||
2603 | |||
2604 | if (usb_disabled()) |
||
2605 | return -ENODEV; |
||
2606 | |||
2607 | if (debug) { |
||
2608 | errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); |
||
2609 | if (!errbuf) |
||
2610 | goto errbuf_failed; |
||
2611 | } |
||
2612 | |||
2613 | #ifdef CONFIG_PROC_FS |
||
2614 | uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0); |
||
2615 | if (!uhci_proc_root) |
||
2616 | goto proc_failed; |
||
2617 | #endif |
||
2618 | |||
2619 | //** uhci_up_cachep = kmem_cache_create("uhci_urb_priv", |
||
2620 | //** sizeof(struct urb_priv), 0, 0, NULL, NULL); |
||
2621 | //** if (!uhci_up_cachep) |
||
2622 | //** goto up_failed; |
||
2623 | |||
2624 | retval = pci_module_init(&uhci_pci_driver); |
||
2625 | if (retval) |
||
2626 | goto init_failed; |
||
2627 | |||
2628 | return 0; |
||
2629 | |||
2630 | init_failed: |
||
2631 | //** if (kmem_cache_destroy(uhci_up_cachep)) |
||
2632 | //** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
||
2633 | |||
2634 | up_failed: |
||
2635 | |||
2636 | #ifdef CONFIG_PROC_FS |
||
2637 | remove_proc_entry("driver/uhci", 0); |
||
2638 | |||
2639 | proc_failed: |
||
2640 | #endif |
||
2641 | if (errbuf) |
||
2642 | kfree(errbuf); |
||
2643 | |||
2644 | errbuf_failed: |
||
2645 | |||
2646 | return retval; |
||
2647 | } |
||
2648 | |||
2649 | /*static*/ void __exit uhci_hcd_cleanup(void) |
||
2650 | { |
||
2651 | pci_unregister_driver(&uhci_pci_driver); |
||
2652 | |||
2653 | //** if (kmem_cache_destroy(uhci_up_cachep)) |
||
2654 | //** printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); |
||
2655 | |||
2656 | #ifdef CONFIG_PROC_FS |
||
2657 | remove_proc_entry("driver/uhci", 0); |
||
2658 | #endif |
||
2659 | |||
2660 | if (errbuf) |
||
2661 | kfree(errbuf); |
||
2662 | } |
||
2663 | |||
2664 | module_init(uhci_hcd_init); |
||
2665 | module_exit(uhci_hcd_cleanup); |
||
2666 | |||
2667 | MODULE_AUTHOR(DRIVER_AUTHOR); |
||
2668 | MODULE_DESCRIPTION(DRIVER_DESC); |
||
2669 | MODULE_LICENSE("GPL"); |
||
2670 |