Rev 1049 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1049 | mauro | 1 | /* |
2 | * Copyright (c) 2001-2002 by David Brownell |
||
3 | * |
||
4 | * This program is free software; you can redistribute it and/or modify it |
||
5 | * under the terms of the GNU General Public License as published by the |
||
6 | * Free Software Foundation; either version 2 of the License, or (at your |
||
7 | * option) any later version. |
||
8 | * |
||
9 | * This program is distributed in the hope that it will be useful, but |
||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
||
11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
||
12 | * for more details. |
||
13 | * |
||
14 | * You should have received a copy of the GNU General Public License |
||
15 | * along with this program; if not, write to the Free Software Foundation, |
||
16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
||
17 | */ |
||
18 | |||
19 | /* this file is part of ehci-hcd.c */ |
||
20 | |||
21 | /*-------------------------------------------------------------------------*/ |
||
22 | |||
23 | /* |
||
24 | * EHCI scheduled transaction support: interrupt, iso, split iso |
||
25 | * These are called "periodic" transactions in the EHCI spec. |
||
26 | * |
||
27 | * Note that for interrupt transfers, the QH/QTD manipulation is shared |
||
28 | * with the "asynchronous" transaction support (control/bulk transfers). |
||
29 | * The only real difference is in how interrupt transfers are scheduled. |
||
30 | * We get some funky API restrictions from the current URB model, which |
||
31 | * works notably better for reading transfers than for writing. (And |
||
32 | * which accordingly needs to change before it'll work inside devices, |
||
33 | * or with "USB On The Go" additions to USB 2.0 ...) |
||
34 | */ |
||
35 | |||
36 | static int ehci_get_frame (struct usb_hcd *hcd); |
||
37 | |||
38 | /*-------------------------------------------------------------------------*/ |
||
39 | |||
40 | /* |
||
41 | * periodic_next_shadow - return "next" pointer on shadow list |
||
42 | * @periodic: host pointer to qh/itd/sitd |
||
43 | * @tag: hardware tag for type of this record |
||
44 | */ |
||
45 | static union ehci_shadow * |
||
46 | periodic_next_shadow (union ehci_shadow *periodic, int tag) |
||
47 | { |
||
48 | switch (tag) { |
||
49 | case Q_TYPE_QH: |
||
50 | return &periodic->qh->qh_next; |
||
51 | case Q_TYPE_FSTN: |
||
52 | return &periodic->fstn->fstn_next; |
||
53 | case Q_TYPE_ITD: |
||
54 | return &periodic->itd->itd_next; |
||
55 | #ifdef have_split_iso |
||
56 | case Q_TYPE_SITD: |
||
57 | return &periodic->sitd->sitd_next; |
||
58 | #endif /* have_split_iso */ |
||
59 | } |
||
60 | dbg ("BAD shadow %p tag %d", periodic->ptr, tag); |
||
61 | // BUG (); |
||
62 | return 0; |
||
63 | } |
||
64 | |||
65 | /* returns true after successful unlink */ |
||
66 | /* caller must hold ehci->lock */ |
||
67 | static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) |
||
68 | { |
||
69 | union ehci_shadow *prev_p = &ehci->pshadow [frame]; |
||
70 | u32 *hw_p = &ehci->periodic [frame]; |
||
71 | union ehci_shadow here = *prev_p; |
||
72 | union ehci_shadow *next_p; |
||
73 | |||
74 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ |
||
75 | while (here.ptr && here.ptr != ptr) { |
||
76 | prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); |
||
77 | hw_p = &here.qh->hw_next; |
||
78 | here = *prev_p; |
||
79 | } |
||
80 | /* an interrupt entry (at list end) could have been shared */ |
||
81 | if (!here.ptr) { |
||
82 | dbg ("entry %p no longer on frame [%d]", ptr, frame); |
||
83 | return 0; |
||
84 | } |
||
85 | // vdbg ("periodic unlink %p from frame %d", ptr, frame); |
||
86 | |||
87 | /* update hardware list ... HC may still know the old structure, so |
||
88 | * don't change hw_next until it'll have purged its cache |
||
89 | */ |
||
90 | next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); |
||
91 | *hw_p = here.qh->hw_next; |
||
92 | |||
93 | /* unlink from shadow list; HCD won't see old structure again */ |
||
94 | *prev_p = *next_p; |
||
95 | next_p->ptr = 0; |
||
96 | |||
97 | return 1; |
||
98 | } |
||
99 | |||
100 | /* how many of the uframe's 125 usecs are allocated? */ |
||
101 | static unsigned short |
||
102 | periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) |
||
103 | { |
||
104 | u32 *hw_p = &ehci->periodic [frame]; |
||
105 | union ehci_shadow *q = &ehci->pshadow [frame]; |
||
106 | unsigned usecs = 0; |
||
107 | |||
108 | while (q->ptr) { |
||
109 | switch (Q_NEXT_TYPE (*hw_p)) { |
||
110 | case Q_TYPE_QH: |
||
111 | /* is it in the S-mask? */ |
||
112 | if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) |
||
113 | usecs += q->qh->usecs; |
||
114 | /* ... or C-mask? */ |
||
115 | if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) |
||
116 | usecs += q->qh->c_usecs; |
||
117 | q = &q->qh->qh_next; |
||
118 | break; |
||
119 | case Q_TYPE_FSTN: |
||
120 | /* for "save place" FSTNs, count the relevant INTR |
||
121 | * bandwidth from the previous frame |
||
122 | */ |
||
123 | if (q->fstn->hw_prev != EHCI_LIST_END) { |
||
124 | dbg ("not counting FSTN bandwidth yet ..."); |
||
125 | } |
||
126 | q = &q->fstn->fstn_next; |
||
127 | break; |
||
128 | case Q_TYPE_ITD: |
||
129 | /* NOTE the "one uframe per itd" policy */ |
||
130 | if (q->itd->hw_transaction [uframe] != 0) |
||
131 | usecs += q->itd->usecs; |
||
132 | q = &q->itd->itd_next; |
||
133 | break; |
||
134 | #ifdef have_split_iso |
||
135 | case Q_TYPE_SITD: |
||
136 | temp = q->sitd->hw_fullspeed_ep & |
||
137 | __constant_cpu_to_le32 (1 << 31); |
||
138 | |||
139 | // FIXME: this doesn't count data bytes right... |
||
140 | |||
141 | /* is it in the S-mask? (count SPLIT, DATA) */ |
||
142 | if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { |
||
143 | if (temp) |
||
144 | usecs += HS_USECS (188); |
||
145 | else |
||
146 | usecs += HS_USECS (1); |
||
147 | } |
||
148 | |||
149 | /* ... C-mask? (count CSPLIT, DATA) */ |
||
150 | if (q->sitd->hw_uframe & |
||
151 | cpu_to_le32 (1 << (8 + uframe))) { |
||
152 | if (temp) |
||
153 | usecs += HS_USECS (0); |
||
154 | else |
||
155 | usecs += HS_USECS (188); |
||
156 | } |
||
157 | q = &q->sitd->sitd_next; |
||
158 | break; |
||
159 | #endif /* have_split_iso */ |
||
160 | default: |
||
161 | BUG (); |
||
162 | } |
||
163 | } |
||
164 | #ifdef DEBUG |
||
165 | if (usecs > 100) |
||
166 | err ("overallocated uframe %d, periodic is %d usecs", |
||
167 | frame * 8 + uframe, usecs); |
||
168 | #endif |
||
169 | return usecs; |
||
170 | } |
||
171 | |||
172 | /*-------------------------------------------------------------------------*/ |
||
173 | |||
174 | static int enable_periodic (struct ehci_hcd *ehci) |
||
175 | { |
||
176 | u32 cmd; |
||
177 | int status; |
||
178 | |||
179 | /* did clearing PSE did take effect yet? |
||
180 | * takes effect only at frame boundaries... |
||
181 | */ |
||
182 | status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); |
||
183 | if (status != 0) { |
||
184 | ehci->hcd.state = USB_STATE_HALT; |
||
185 | return status; |
||
186 | } |
||
187 | |||
188 | cmd = readl (&ehci->regs->command) | CMD_PSE; |
||
189 | writel (cmd, &ehci->regs->command); |
||
190 | /* posted write ... PSS happens later */ |
||
191 | ehci->hcd.state = USB_STATE_RUNNING; |
||
192 | |||
193 | /* make sure ehci_work scans these */ |
||
194 | ehci->next_uframe = readl (&ehci->regs->frame_index) |
||
195 | % (ehci->periodic_size << 3); |
||
196 | return 0; |
||
197 | } |
||
198 | |||
199 | static int disable_periodic (struct ehci_hcd *ehci) |
||
200 | { |
||
201 | u32 cmd; |
||
202 | int status; |
||
203 | |||
204 | /* did setting PSE not take effect yet? |
||
205 | * takes effect only at frame boundaries... |
||
206 | */ |
||
207 | status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); |
||
208 | if (status != 0) { |
||
209 | ehci->hcd.state = USB_STATE_HALT; |
||
210 | return status; |
||
211 | } |
||
212 | |||
213 | cmd = readl (&ehci->regs->command) & ~CMD_PSE; |
||
214 | writel (cmd, &ehci->regs->command); |
||
215 | /* posted write ... */ |
||
216 | |||
217 | ehci->next_uframe = -1; |
||
218 | return 0; |
||
219 | } |
||
220 | |||
221 | /*-------------------------------------------------------------------------*/ |
||
222 | |||
223 | // FIXME microframe periods not yet handled |
||
224 | |||
225 | static void intr_deschedule ( |
||
226 | struct ehci_hcd *ehci, |
||
227 | struct ehci_qh *qh, |
||
228 | int wait |
||
229 | ) { |
||
230 | int status; |
||
231 | unsigned frame = qh->start; |
||
232 | |||
233 | do { |
||
234 | periodic_unlink (ehci, frame, qh); |
||
235 | qh_put (ehci, qh); |
||
236 | frame += qh->period; |
||
237 | } while (frame < ehci->periodic_size); |
||
238 | |||
239 | qh->qh_state = QH_STATE_UNLINK; |
||
240 | qh->qh_next.ptr = 0; |
||
241 | ehci->periodic_sched--; |
||
242 | |||
243 | /* maybe turn off periodic schedule */ |
||
244 | if (!ehci->periodic_sched) |
||
245 | status = disable_periodic (ehci); |
||
246 | else { |
||
247 | status = 0; |
||
248 | vdbg ("periodic schedule still enabled"); |
||
249 | } |
||
250 | |||
251 | /* |
||
252 | * If the hc may be looking at this qh, then delay a uframe |
||
253 | * (yeech!) to be sure it's done. |
||
254 | * No other threads may be mucking with this qh. |
||
255 | */ |
||
256 | if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) { |
||
257 | if (wait) { |
||
258 | udelay (125); |
||
259 | qh->hw_next = EHCI_LIST_END; |
||
260 | } else { |
||
261 | /* we may not be IDLE yet, but if the qh is empty |
||
262 | * the race is very short. then if qh also isn't |
||
263 | * rescheduled soon, it won't matter. otherwise... |
||
264 | */ |
||
265 | vdbg ("intr_deschedule..."); |
||
266 | } |
||
267 | } else |
||
268 | qh->hw_next = EHCI_LIST_END; |
||
269 | |||
270 | qh->qh_state = QH_STATE_IDLE; |
||
271 | |||
272 | /* update per-qh bandwidth utilization (for usbfs) */ |
||
273 | hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= |
||
274 | (qh->usecs + qh->c_usecs) / qh->period; |
||
275 | |||
276 | dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d", |
||
277 | qh, qh->period, frame, |
||
278 | atomic_read (&qh->refcount), ehci->periodic_sched); |
||
279 | } |
||
280 | |||
281 | static int check_period ( |
||
282 | struct ehci_hcd *ehci, |
||
283 | unsigned frame, |
||
284 | unsigned uframe, |
||
285 | unsigned period, |
||
286 | unsigned usecs |
||
287 | ) { |
||
288 | /* complete split running into next frame? |
||
289 | * given FSTN support, we could sometimes check... |
||
290 | */ |
||
291 | if (uframe >= 8) |
||
292 | return 0; |
||
293 | |||
294 | /* |
||
295 | * 80% periodic == 100 usec/uframe available |
||
296 | * convert "usecs we need" to "max already claimed" |
||
297 | */ |
||
298 | usecs = 100 - usecs; |
||
299 | |||
300 | do { |
||
301 | int claimed; |
||
302 | |||
303 | // FIXME delete when intr_submit handles non-empty queues |
||
304 | // this gives us a one intr/frame limit (vs N/uframe) |
||
305 | // ... and also lets us avoid tracking split transactions |
||
306 | // that might collide at a given TT/hub. |
||
307 | if (ehci->pshadow [frame].ptr) |
||
308 | return 0; |
||
309 | |||
310 | claimed = periodic_usecs (ehci, frame, uframe); |
||
311 | if (claimed > usecs) |
||
312 | return 0; |
||
313 | |||
314 | // FIXME update to handle sub-frame periods |
||
315 | } while ((frame += period) < ehci->periodic_size); |
||
316 | |||
317 | // success! |
||
318 | return 1; |
||
319 | } |
||
320 | |||
321 | static int check_intr_schedule ( |
||
322 | struct ehci_hcd *ehci, |
||
323 | unsigned frame, |
||
324 | unsigned uframe, |
||
325 | const struct ehci_qh *qh, |
||
326 | u32 *c_maskp |
||
327 | ) |
||
328 | { |
||
329 | int retval = -ENOSPC; |
||
330 | |||
331 | if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) |
||
332 | goto done; |
||
333 | if (!qh->c_usecs) { |
||
334 | retval = 0; |
||
335 | *c_maskp = cpu_to_le32 (0); |
||
336 | goto done; |
||
337 | } |
||
338 | |||
339 | /* This is a split transaction; check the bandwidth available for |
||
340 | * the completion too. Check both worst and best case gaps: worst |
||
341 | * case is SPLIT near uframe end, and CSPLIT near start ... best is |
||
342 | * vice versa. Difference can be almost two uframe times, but we |
||
343 | * reserve unnecessary bandwidth (waste it) this way. (Actually |
||
344 | * even better cases exist, like immediate device NAK.) |
||
345 | * |
||
346 | * FIXME don't even bother unless we know this TT is idle in that |
||
347 | * range of uframes ... for now, check_period() allows only one |
||
348 | * interrupt transfer per frame, so needn't check "TT busy" status |
||
349 | * when scheduling a split (QH, SITD, or FSTN). |
||
350 | * |
||
351 | * FIXME ehci 0.96 and above can use FSTNs |
||
352 | */ |
||
353 | if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, |
||
354 | qh->period, qh->c_usecs)) |
||
355 | goto done; |
||
356 | if (!check_period (ehci, frame, uframe + qh->gap_uf, |
||
357 | qh->period, qh->c_usecs)) |
||
358 | goto done; |
||
359 | |||
360 | *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf)); |
||
361 | retval = 0; |
||
362 | done: |
||
363 | return retval; |
||
364 | } |
||
365 | |||
366 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
||
367 | { |
||
368 | int status; |
||
369 | unsigned uframe; |
||
370 | u32 c_mask; |
||
371 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ |
||
372 | |||
373 | qh->hw_next = EHCI_LIST_END; |
||
374 | frame = qh->start; |
||
375 | |||
376 | /* reuse the previous schedule slots, if we can */ |
||
377 | if (frame < qh->period) { |
||
378 | uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); |
||
379 | status = check_intr_schedule (ehci, frame, --uframe, |
||
380 | qh, &c_mask); |
||
381 | } else { |
||
382 | uframe = 0; |
||
383 | c_mask = 0; |
||
384 | status = -ENOSPC; |
||
385 | } |
||
386 | |||
387 | /* else scan the schedule to find a group of slots such that all |
||
388 | * uframes have enough periodic bandwidth available. |
||
389 | */ |
||
390 | if (status) { |
||
391 | frame = qh->period - 1; |
||
392 | do { |
||
393 | for (uframe = 0; uframe < 8; uframe++) { |
||
394 | status = check_intr_schedule (ehci, |
||
395 | frame, uframe, qh, |
||
396 | &c_mask); |
||
397 | if (status == 0) |
||
398 | break; |
||
399 | } |
||
400 | } while (status && frame--); |
||
401 | if (status) |
||
402 | goto done; |
||
403 | qh->start = frame; |
||
404 | |||
405 | /* reset S-frame and (maybe) C-frame masks */ |
||
406 | qh->hw_info2 &= ~0xffff; |
||
407 | qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; |
||
408 | } else |
||
409 | dbg ("reused previous qh %p schedule", qh); |
||
410 | |||
411 | /* stuff into the periodic schedule */ |
||
412 | qh->qh_state = QH_STATE_LINKED; |
||
413 | dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)", |
||
414 | qh, qh->usecs, qh->c_usecs, |
||
415 | qh->period, frame, uframe, qh->gap_uf); |
||
416 | do { |
||
417 | if (unlikely (ehci->pshadow [frame].ptr != 0)) { |
||
418 | |||
419 | // FIXME -- just link toward the end, before any qh with a shorter period, |
||
420 | // AND accommodate it already having been linked here (after some other qh) |
||
421 | // AS WELL AS updating the schedule checking logic |
||
422 | |||
423 | BUG (); |
||
424 | } else { |
||
425 | ehci->pshadow [frame].qh = qh_get (qh); |
||
426 | ehci->periodic [frame] = |
||
427 | QH_NEXT (qh->qh_dma); |
||
428 | } |
||
429 | wmb (); |
||
430 | frame += qh->period; |
||
431 | } while (frame < ehci->periodic_size); |
||
432 | |||
433 | /* update per-qh bandwidth for usbfs */ |
||
434 | hcd_to_bus (&ehci->hcd)->bandwidth_allocated += |
||
435 | (qh->usecs + qh->c_usecs) / qh->period; |
||
436 | |||
437 | /* maybe enable periodic schedule processing */ |
||
438 | if (!ehci->periodic_sched++) |
||
439 | status = enable_periodic (ehci); |
||
440 | done: |
||
441 | return status; |
||
442 | } |
||
443 | |||
444 | static int intr_submit ( |
||
445 | struct ehci_hcd *ehci, |
||
446 | struct urb *urb, |
||
447 | struct list_head *qtd_list, |
||
448 | int mem_flags |
||
449 | ) { |
||
450 | unsigned epnum; |
||
451 | unsigned long flags; |
||
452 | struct ehci_qh *qh; |
||
453 | struct hcd_dev *dev; |
||
454 | int is_input; |
||
455 | int status = 0; |
||
456 | struct list_head empty; |
||
457 | |||
458 | /* get endpoint and transfer/schedule data */ |
||
459 | epnum = usb_pipeendpoint (urb->pipe); |
||
460 | is_input = usb_pipein (urb->pipe); |
||
461 | if (is_input) |
||
462 | epnum |= 0x10; |
||
463 | |||
464 | spin_lock_irqsave (&ehci->lock, flags); |
||
465 | dev = (struct hcd_dev *)urb->dev->hcpriv; |
||
466 | |||
467 | /* get qh and force any scheduling errors */ |
||
468 | INIT_LIST_HEAD (&empty); |
||
469 | qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]); |
||
470 | if (qh == 0) { |
||
471 | status = -ENOMEM; |
||
472 | goto done; |
||
473 | } |
||
474 | if (qh->qh_state == QH_STATE_IDLE) { |
||
475 | if ((status = qh_schedule (ehci, qh)) != 0) |
||
476 | goto done; |
||
477 | } |
||
478 | |||
479 | /* then queue the urb's tds to the qh */ |
||
480 | qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]); |
||
481 | BUG_ON (qh == 0); |
||
482 | |||
483 | /* ... update usbfs periodic stats */ |
||
484 | hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++; |
||
485 | |||
486 | done: |
||
487 | spin_unlock_irqrestore (&ehci->lock, flags); |
||
488 | if (status) |
||
489 | qtd_list_free (ehci, urb, qtd_list); |
||
490 | |||
491 | return status; |
||
492 | } |
||
493 | |||
494 | static unsigned |
||
495 | intr_complete ( |
||
496 | struct ehci_hcd *ehci, |
||
497 | unsigned frame, |
||
498 | struct ehci_qh *qh, |
||
499 | struct pt_regs *regs |
||
500 | ) { |
||
501 | unsigned count; |
||
502 | |||
503 | /* nothing to report? */ |
||
504 | if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE)) |
||
505 | != 0)) |
||
506 | return 0; |
||
507 | if (unlikely (list_empty (&qh->qtd_list))) { |
||
508 | dbg ("intr qh %p no TDs?", qh); |
||
509 | return 0; |
||
510 | } |
||
511 | |||
512 | /* handle any completions */ |
||
513 | count = qh_completions (ehci, qh, regs); |
||
514 | |||
515 | if (unlikely (list_empty (&qh->qtd_list))) |
||
516 | intr_deschedule (ehci, qh, 0); |
||
517 | |||
518 | return count; |
||
519 | } |
||
520 | |||
521 | /*-------------------------------------------------------------------------*/ |
||
522 | |||
523 | static void |
||
524 | itd_free_list (struct ehci_hcd *ehci, struct urb *urb) |
||
525 | { |
||
526 | struct ehci_itd *first_itd = urb->hcpriv; |
||
527 | |||
528 | while (!list_empty (&first_itd->itd_list)) { |
||
529 | struct ehci_itd *itd; |
||
530 | |||
531 | itd = list_entry ( |
||
532 | first_itd->itd_list.next, |
||
533 | struct ehci_itd, itd_list); |
||
534 | list_del (&itd->itd_list); |
||
535 | pci_pool_free (ehci->itd_pool, itd, itd->itd_dma); |
||
536 | } |
||
537 | pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma); |
||
538 | urb->hcpriv = 0; |
||
539 | } |
||
540 | |||
541 | static int |
||
542 | itd_fill ( |
||
543 | struct ehci_hcd *ehci, |
||
544 | struct ehci_itd *itd, |
||
545 | struct urb *urb, |
||
546 | unsigned index, // urb->iso_frame_desc [index] |
||
547 | dma_addr_t dma // mapped transfer buffer |
||
548 | ) { |
||
549 | u64 temp; |
||
550 | u32 buf1; |
||
551 | unsigned i, epnum, maxp, multi; |
||
552 | unsigned length; |
||
553 | int is_input; |
||
554 | |||
555 | itd->hw_next = EHCI_LIST_END; |
||
556 | itd->urb = urb; |
||
557 | itd->index = index; |
||
558 | |||
559 | /* tell itd about its transfer buffer, max 2 pages */ |
||
560 | length = urb->iso_frame_desc [index].length; |
||
561 | dma += urb->iso_frame_desc [index].offset; |
||
562 | temp = dma & ~0x0fff; |
||
563 | for (i = 0; i < 2; i++) { |
||
564 | itd->hw_bufp [i] = cpu_to_le32 ((u32) temp); |
||
565 | itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32)); |
||
566 | temp += 0x1000; |
||
567 | } |
||
568 | itd->buf_dma = dma; |
||
569 | |||
570 | /* |
||
571 | * this might be a "high bandwidth" highspeed endpoint, |
||
572 | * as encoded in the ep descriptor's maxpacket field |
||
573 | */ |
||
574 | epnum = usb_pipeendpoint (urb->pipe); |
||
575 | is_input = usb_pipein (urb->pipe); |
||
576 | if (is_input) { |
||
577 | maxp = urb->dev->epmaxpacketin [epnum]; |
||
578 | buf1 = (1 << 11); |
||
579 | } else { |
||
580 | maxp = urb->dev->epmaxpacketout [epnum]; |
||
581 | buf1 = 0; |
||
582 | } |
||
583 | buf1 |= (maxp & 0x03ff); |
||
584 | multi = 1; |
||
585 | multi += (maxp >> 11) & 0x03; |
||
586 | maxp &= 0x03ff; |
||
587 | maxp *= multi; |
||
588 | |||
589 | /* transfer can't fit in any uframe? */ |
||
590 | if (length < 0 || maxp < length) { |
||
591 | dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)", |
||
592 | length, maxp, urb, index, |
||
593 | urb->iso_frame_desc [index].length); |
||
594 | return -ENOSPC; |
||
595 | } |
||
596 | itd->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 1, length); |
||
597 | |||
598 | /* "plus" info in low order bits of buffer pointers */ |
||
599 | itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum); |
||
600 | itd->hw_bufp [1] |= cpu_to_le32 (buf1); |
||
601 | itd->hw_bufp [2] |= cpu_to_le32 (multi); |
||
602 | |||
603 | /* figure hw_transaction[] value (it's scheduled later) */ |
||
604 | itd->transaction = EHCI_ISOC_ACTIVE; |
||
605 | itd->transaction |= dma & 0x0fff; /* offset; buffer=0 */ |
||
606 | if ((index + 1) == urb->number_of_packets) |
||
607 | itd->transaction |= EHCI_ITD_IOC; /* end-of-urb irq */ |
||
608 | itd->transaction |= length << 16; |
||
609 | cpu_to_le32s (&itd->transaction); |
||
610 | |||
611 | return 0; |
||
612 | } |
||
613 | |||
614 | static int |
||
615 | itd_urb_transaction ( |
||
616 | struct ehci_hcd *ehci, |
||
617 | struct urb *urb, |
||
618 | int mem_flags |
||
619 | ) { |
||
620 | int frame_index; |
||
621 | struct ehci_itd *first_itd, *itd; |
||
622 | int status; |
||
623 | dma_addr_t itd_dma; |
||
624 | |||
625 | /* allocate/init ITDs */ |
||
626 | for (frame_index = 0, first_itd = 0; |
||
627 | frame_index < urb->number_of_packets; |
||
628 | frame_index++) { |
||
629 | itd = pci_pool_alloc_usb (ehci->itd_pool, mem_flags, &itd_dma); |
||
630 | if (!itd) { |
||
631 | status = -ENOMEM; |
||
632 | goto fail; |
||
633 | } |
||
634 | memset (itd, 0, sizeof *itd); |
||
635 | itd->itd_dma = itd_dma; |
||
636 | |||
637 | status = itd_fill (ehci, itd, urb, frame_index, |
||
638 | urb->transfer_dma); |
||
639 | if (status != 0) |
||
640 | goto fail; |
||
641 | |||
642 | if (first_itd) |
||
643 | list_add_tail (&itd->itd_list, |
||
644 | &first_itd->itd_list); |
||
645 | else { |
||
646 | INIT_LIST_HEAD (&itd->itd_list); |
||
647 | urb->hcpriv = first_itd = itd; |
||
648 | } |
||
649 | } |
||
650 | urb->error_count = 0; |
||
651 | return 0; |
||
652 | |||
653 | fail: |
||
654 | if (urb->hcpriv) |
||
655 | itd_free_list (ehci, urb); |
||
656 | return status; |
||
657 | } |
||
658 | |||
659 | /*-------------------------------------------------------------------------*/ |
||
660 | |||
661 | static inline void |
||
662 | itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) |
||
663 | { |
||
664 | /* always prepend ITD/SITD ... only QH tree is order-sensitive */ |
||
665 | itd->itd_next = ehci->pshadow [frame]; |
||
666 | itd->hw_next = ehci->periodic [frame]; |
||
667 | ehci->pshadow [frame].itd = itd; |
||
668 | ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; |
||
669 | } |
||
670 | |||
671 | /* |
||
672 | * return zero on success, else -errno |
||
673 | * - start holds first uframe to start scheduling into |
||
674 | * - max is the first uframe it's NOT (!) OK to start scheduling into |
||
675 | * math to be done modulo "mod" (ehci->periodic_size << 3) |
||
676 | */ |
||
677 | static int get_iso_range ( |
||
678 | struct ehci_hcd *ehci, |
||
679 | struct urb *urb, |
||
680 | unsigned *start, |
||
681 | unsigned *max, |
||
682 | unsigned mod |
||
683 | ) { |
||
684 | struct list_head *lh; |
||
685 | struct hcd_dev *dev = urb->dev->hcpriv; |
||
686 | int last = -1; |
||
687 | unsigned now, span, end; |
||
688 | |||
689 | span = urb->interval * urb->number_of_packets; |
||
690 | |||
691 | /* first see if we know when the next transfer SHOULD happen */ |
||
692 | list_for_each (lh, &dev->urb_list) { |
||
693 | struct urb *u; |
||
694 | struct ehci_itd *itd; |
||
695 | unsigned s; |
||
696 | |||
697 | u = list_entry (lh, struct urb, urb_list); |
||
698 | if (u == urb || u->pipe != urb->pipe) |
||
699 | continue; |
||
700 | if (u->interval != urb->interval) { /* must not change! */ |
||
701 | dbg ("urb %p interval %d ... != %p interval %d", |
||
702 | u, u->interval, urb, urb->interval); |
||
703 | return -EINVAL; |
||
704 | } |
||
705 | |||
706 | /* URB for this endpoint... covers through when? */ |
||
707 | itd = urb->hcpriv; |
||
708 | s = itd->uframe + u->interval * u->number_of_packets; |
||
709 | if (last < 0) |
||
710 | last = s; |
||
711 | else { |
||
712 | /* |
||
713 | * So far we can only queue two ISO URBs... |
||
714 | * |
||
715 | * FIXME do interval math, figure out whether |
||
716 | * this URB is "before" or not ... also, handle |
||
717 | * the case where the URB might have completed, |
||
718 | * but hasn't yet been processed. |
||
719 | */ |
||
720 | dbg ("NYET: queue >2 URBs per ISO endpoint"); |
||
721 | return -EDOM; |
||
722 | } |
||
723 | } |
||
724 | |||
725 | /* calculate the legal range [start,max) */ |
||
726 | now = readl (&ehci->regs->frame_index) + 1; /* next uframe */ |
||
727 | if (!ehci->periodic_sched) |
||
728 | now += 8; /* startup delay */ |
||
729 | now %= mod; |
||
730 | end = now + mod; |
||
731 | if (last < 0) { |
||
732 | *start = now + ehci->i_thresh + /* paranoia */ 1; |
||
733 | *max = end - span; |
||
734 | if (*max < *start + 1) |
||
735 | *max = *start + 1; |
||
736 | } else { |
||
737 | *start = last % mod; |
||
738 | *max = (last + 1) % mod; |
||
739 | } |
||
740 | |||
741 | /* explicit start frame? */ |
||
742 | if (!(urb->transfer_flags & URB_ISO_ASAP)) { |
||
743 | unsigned temp; |
||
744 | |||
745 | /* sanity check: must be in range */ |
||
746 | urb->start_frame %= ehci->periodic_size; |
||
747 | temp = urb->start_frame << 3; |
||
748 | if (temp < *start) |
||
749 | temp += mod; |
||
750 | if (temp > *max) |
||
751 | return -EDOM; |
||
752 | |||
753 | /* use that explicit start frame */ |
||
754 | *start = urb->start_frame << 3; |
||
755 | temp += 8; |
||
756 | if (temp < *max) |
||
757 | *max = temp; |
||
758 | } |
||
759 | |||
760 | // FIXME minimize wraparound to "now" ... insist max+span |
||
761 | // (and start+span) remains a few frames short of "end" |
||
762 | |||
763 | *max %= ehci->periodic_size; |
||
764 | if ((*start + span) < end) |
||
765 | return 0; |
||
766 | return -EFBIG; |
||
767 | } |
||
768 | |||
769 | static int |
||
770 | itd_schedule (struct ehci_hcd *ehci, struct urb *urb) |
||
771 | { |
||
772 | unsigned start, max, i; |
||
773 | int status; |
||
774 | unsigned mod = ehci->periodic_size << 3; |
||
775 | |||
776 | for (i = 0; i < urb->number_of_packets; i++) { |
||
777 | urb->iso_frame_desc [i].status = -EINPROGRESS; |
||
778 | urb->iso_frame_desc [i].actual_length = 0; |
||
779 | } |
||
780 | |||
781 | if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0) |
||
782 | return status; |
||
783 | |||
784 | do { |
||
785 | unsigned uframe; |
||
786 | unsigned usecs; |
||
787 | struct ehci_itd *itd; |
||
788 | |||
789 | /* check schedule: enough space? */ |
||
790 | itd = urb->hcpriv; |
||
791 | uframe = start; |
||
792 | for (i = 0, uframe = start; |
||
793 | i < urb->number_of_packets; |
||
794 | i++, uframe += urb->interval) { |
||
795 | uframe %= mod; |
||
796 | |||
797 | /* can't commit more than 80% periodic == 100 usec */ |
||
798 | if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) |
||
799 | > (100 - itd->usecs)) { |
||
800 | itd = 0; |
||
801 | break; |
||
802 | } |
||
803 | itd = list_entry (itd->itd_list.next, |
||
804 | struct ehci_itd, itd_list); |
||
805 | } |
||
806 | if (!itd) |
||
807 | continue; |
||
808 | |||
809 | /* that's where we'll schedule this! */ |
||
810 | itd = urb->hcpriv; |
||
811 | urb->start_frame = start >> 3; |
||
812 | vdbg ("ISO urb %p (%d packets period %d) starting %d.%d", |
||
813 | urb, urb->number_of_packets, urb->interval, |
||
814 | urb->start_frame, start & 0x7); |
||
815 | for (i = 0, uframe = start, usecs = 0; |
||
816 | i < urb->number_of_packets; |
||
817 | i++, uframe += urb->interval) { |
||
818 | uframe %= mod; |
||
819 | |||
820 | itd->uframe = uframe; |
||
821 | itd->hw_transaction [uframe & 0x07] = itd->transaction; |
||
822 | itd_link (ehci, (uframe >> 3) % ehci->periodic_size, |
||
823 | itd); |
||
824 | wmb (); |
||
825 | usecs += itd->usecs; |
||
826 | |||
827 | itd = list_entry (itd->itd_list.next, |
||
828 | struct ehci_itd, itd_list); |
||
829 | } |
||
830 | |||
831 | /* update bandwidth utilization records (for usbfs) |
||
832 | * |
||
833 | * FIXME This claims each URB queued to an endpoint, as if |
||
834 | * transfers were concurrent, not sequential. So bandwidth |
||
835 | * typically gets double-billed ... comes from tying it to |
||
836 | * URBs rather than endpoints in the schedule. Luckily we |
||
837 | * don't use this usbfs data for serious decision making. |
||
838 | */ |
||
839 | usecs /= urb->number_of_packets; |
||
840 | usecs /= urb->interval; |
||
841 | usecs >>= 3; |
||
842 | if (usecs < 1) |
||
843 | usecs = 1; |
||
844 | usb_claim_bandwidth (urb->dev, urb, usecs, 1); |
||
845 | |||
846 | /* maybe enable periodic schedule processing */ |
||
847 | if (!ehci->periodic_sched++) { |
||
848 | if ((status = enable_periodic (ehci)) != 0) { |
||
849 | // FIXME deschedule right away |
||
850 | err ("itd_schedule, enable = %d", status); |
||
851 | } |
||
852 | } |
||
853 | |||
854 | return 0; |
||
855 | |||
856 | } while ((start = ++start % mod) != max); |
||
857 | |||
858 | /* no room in the schedule */ |
||
859 | dbg ("urb %p, CAN'T SCHEDULE", urb); |
||
860 | return -ENOSPC; |
||
861 | } |
||
862 | |||
863 | /*-------------------------------------------------------------------------*/ |
||
864 | |||
865 | #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) |
||
866 | |||
867 | static unsigned |
||
868 | itd_complete ( |
||
869 | struct ehci_hcd *ehci, |
||
870 | struct ehci_itd *itd, |
||
871 | unsigned uframe, |
||
872 | struct pt_regs *regs |
||
873 | ) { |
||
874 | struct urb *urb = itd->urb; |
||
875 | struct usb_iso_packet_descriptor *desc; |
||
876 | u32 t; |
||
877 | |||
878 | /* update status for this uframe's transfers */ |
||
879 | desc = &urb->iso_frame_desc [itd->index]; |
||
880 | |||
881 | t = itd->hw_transaction [uframe]; |
||
882 | itd->hw_transaction [uframe] = 0; |
||
883 | if (t & EHCI_ISOC_ACTIVE) |
||
884 | desc->status = -EXDEV; |
||
885 | else if (t & ISO_ERRS) { |
||
886 | urb->error_count++; |
||
887 | if (t & EHCI_ISOC_BUF_ERR) |
||
888 | desc->status = usb_pipein (urb->pipe) |
||
889 | ? -ENOSR /* couldn't read */ |
||
890 | : -ECOMM; /* couldn't write */ |
||
891 | else if (t & EHCI_ISOC_BABBLE) |
||
892 | desc->status = -EOVERFLOW; |
||
893 | else /* (t & EHCI_ISOC_XACTERR) */ |
||
894 | desc->status = -EPROTO; |
||
895 | |||
896 | /* HC need not update length with this error */ |
||
897 | if (!(t & EHCI_ISOC_BABBLE)) |
||
898 | desc->actual_length += EHCI_ITD_LENGTH (t); |
||
899 | } else { |
||
900 | desc->status = 0; |
||
901 | desc->actual_length += EHCI_ITD_LENGTH (t); |
||
902 | } |
||
903 | |||
904 | vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d", |
||
905 | itd, urb, itd->index + 1, urb->number_of_packets, |
||
906 | t, desc->status, desc->actual_length); |
||
907 | |||
908 | /* handle completion now? */ |
||
909 | if ((itd->index + 1) != urb->number_of_packets) |
||
910 | return 0; |
||
911 | |||
912 | /* |
||
913 | * Always give the urb back to the driver ... expect it to submit |
||
914 | * a new urb (or resubmit this), and to have another already queued |
||
915 | * when un-interrupted transfers are needed. |
||
916 | * |
||
917 | * NOTE that for now we don't accelerate ISO unlinks; they just |
||
918 | * happen according to the current schedule. Means a delay of |
||
919 | * up to about a second (max). |
||
920 | */ |
||
921 | itd_free_list (ehci, urb); |
||
922 | if (urb->status == -EINPROGRESS) |
||
923 | urb->status = 0; |
||
924 | |||
925 | /* complete() can reenter this HCD */ |
||
926 | spin_unlock (&ehci->lock); |
||
927 | usb_hcd_giveback_urb (&ehci->hcd, urb, regs); |
||
928 | spin_lock (&ehci->lock); |
||
929 | |||
930 | /* defer stopping schedule; completion can submit */ |
||
931 | ehci->periodic_sched--; |
||
932 | if (!ehci->periodic_sched) |
||
933 | (void) disable_periodic (ehci); |
||
934 | |||
935 | return 1; |
||
936 | } |
||
937 | |||
938 | /*-------------------------------------------------------------------------*/ |
||
939 | |||
940 | static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
||
941 | { |
||
942 | int status; |
||
943 | unsigned long flags; |
||
944 | |||
945 | dbg ("itd_submit urb %p", urb); |
||
946 | |||
947 | /* allocate ITDs w/o locking anything */ |
||
948 | status = itd_urb_transaction (ehci, urb, mem_flags); |
||
949 | if (status < 0) |
||
950 | return status; |
||
951 | |||
952 | /* schedule ... need to lock */ |
||
953 | spin_lock_irqsave (&ehci->lock, flags); |
||
954 | status = itd_schedule (ehci, urb); |
||
955 | spin_unlock_irqrestore (&ehci->lock, flags); |
||
956 | if (status < 0) |
||
957 | itd_free_list (ehci, urb); |
||
958 | |||
959 | return status; |
||
960 | } |
||
961 | |||
962 | #ifdef have_split_iso |
||
963 | |||
964 | /*-------------------------------------------------------------------------*/ |
||
965 | |||
966 | /* |
||
967 | * "Split ISO TDs" ... used for USB 1.1 devices going through |
||
968 | * the TTs in USB 2.0 hubs. |
||
969 | * |
||
970 | * FIXME not yet implemented |
||
971 | */ |
||
972 | |||
973 | #endif /* have_split_iso */ |
||
974 | |||
975 | /*-------------------------------------------------------------------------*/ |
||
976 | |||
977 | static void |
||
978 | scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) |
||
979 | { |
||
980 | unsigned frame, clock, now_uframe, mod; |
||
981 | unsigned count = 0; |
||
982 | |||
983 | mod = ehci->periodic_size << 3; |
||
984 | |||
985 | /* |
||
986 | * When running, scan from last scan point up to "now" |
||
987 | * else clean up by scanning everything that's left. |
||
988 | * Touches as few pages as possible: cache-friendly. |
||
989 | * Don't scan ISO entries more than once, though. |
||
990 | */ |
||
991 | frame = ehci->next_uframe >> 3; |
||
992 | if (HCD_IS_RUNNING (ehci->hcd.state)) |
||
993 | now_uframe = readl (&ehci->regs->frame_index); |
||
994 | else |
||
995 | now_uframe = (frame << 3) - 1; |
||
996 | now_uframe %= mod; |
||
997 | clock = now_uframe >> 3; |
||
998 | |||
999 | for (;;) { |
||
1000 | union ehci_shadow q, *q_p; |
||
1001 | u32 type, *hw_p; |
||
1002 | unsigned uframes; |
||
1003 | |||
1004 | restart: |
||
1005 | /* scan schedule to _before_ current frame index */ |
||
1006 | if (frame == clock) |
||
1007 | uframes = now_uframe & 0x07; |
||
1008 | else |
||
1009 | uframes = 8; |
||
1010 | |||
1011 | q_p = &ehci->pshadow [frame]; |
||
1012 | hw_p = &ehci->periodic [frame]; |
||
1013 | q.ptr = q_p->ptr; |
||
1014 | type = Q_NEXT_TYPE (*hw_p); |
||
1015 | |||
1016 | /* scan each element in frame's queue for completions */ |
||
1017 | while (q.ptr != 0) { |
||
1018 | int last; |
||
1019 | unsigned uf; |
||
1020 | union ehci_shadow temp; |
||
1021 | |||
1022 | switch (type) { |
||
1023 | case Q_TYPE_QH: |
||
1024 | last = (q.qh->hw_next == EHCI_LIST_END); |
||
1025 | temp = q.qh->qh_next; |
||
1026 | type = Q_NEXT_TYPE (q.qh->hw_next); |
||
1027 | count += intr_complete (ehci, frame, |
||
1028 | qh_get (q.qh), regs); |
||
1029 | qh_put (ehci, q.qh); |
||
1030 | q = temp; |
||
1031 | break; |
||
1032 | case Q_TYPE_FSTN: |
||
1033 | last = (q.fstn->hw_next == EHCI_LIST_END); |
||
1034 | /* for "save place" FSTNs, look at QH entries |
||
1035 | * in the previous frame for completions. |
||
1036 | */ |
||
1037 | if (q.fstn->hw_prev != EHCI_LIST_END) { |
||
1038 | dbg ("ignoring completions from FSTNs"); |
||
1039 | } |
||
1040 | type = Q_NEXT_TYPE (q.fstn->hw_next); |
||
1041 | q = q.fstn->fstn_next; |
||
1042 | break; |
||
1043 | case Q_TYPE_ITD: |
||
1044 | last = (q.itd->hw_next == EHCI_LIST_END); |
||
1045 | |||
1046 | /* Unlink each (S)ITD we see, since the ISO |
||
1047 | * URB model forces constant rescheduling. |
||
1048 | * That complicates sharing uframes in ITDs, |
||
1049 | * and means we need to skip uframes the HC |
||
1050 | * hasn't yet processed. |
||
1051 | */ |
||
1052 | for (uf = 0; uf < uframes; uf++) { |
||
1053 | if (q.itd->hw_transaction [uf] != 0) { |
||
1054 | temp = q; |
||
1055 | *q_p = q.itd->itd_next; |
||
1056 | *hw_p = q.itd->hw_next; |
||
1057 | type = Q_NEXT_TYPE (*hw_p); |
||
1058 | |||
1059 | /* might free q.itd ... */ |
||
1060 | count += itd_complete (ehci, |
||
1061 | temp.itd, uf, regs); |
||
1062 | break; |
||
1063 | } |
||
1064 | } |
||
1065 | /* we might skip this ITD's uframe ... */ |
||
1066 | if (uf == uframes) { |
||
1067 | q_p = &q.itd->itd_next; |
||
1068 | hw_p = &q.itd->hw_next; |
||
1069 | type = Q_NEXT_TYPE (q.itd->hw_next); |
||
1070 | } |
||
1071 | |||
1072 | q = *q_p; |
||
1073 | break; |
||
1074 | #ifdef have_split_iso |
||
1075 | case Q_TYPE_SITD: |
||
1076 | last = (q.sitd->hw_next == EHCI_LIST_END); |
||
1077 | sitd_complete (ehci, q.sitd); |
||
1078 | type = Q_NEXT_TYPE (q.sitd->hw_next); |
||
1079 | |||
1080 | // FIXME unlink SITD after split completes |
||
1081 | q = q.sitd->sitd_next; |
||
1082 | break; |
||
1083 | #endif /* have_split_iso */ |
||
1084 | default: |
||
1085 | dbg ("corrupt type %d frame %d shadow %p", |
||
1086 | type, frame, q.ptr); |
||
1087 | // BUG (); |
||
1088 | last = 1; |
||
1089 | q.ptr = 0; |
||
1090 | } |
||
1091 | |||
1092 | /* did completion remove an interior q entry? */ |
||
1093 | if (unlikely (q.ptr == 0 && !last)) |
||
1094 | goto restart; |
||
1095 | } |
||
1096 | |||
1097 | /* stop when we catch up to the HC */ |
||
1098 | |||
1099 | // FIXME: this assumes we won't get lapped when |
||
1100 | // latencies climb; that should be rare, but... |
||
1101 | // detect it, and just go all the way around. |
||
1102 | // FLR might help detect this case, so long as latencies |
||
1103 | // don't exceed periodic_size msec (default 1.024 sec). |
||
1104 | |||
1105 | // FIXME: likewise assumes HC doesn't halt mid-scan |
||
1106 | |||
1107 | if (frame == clock) { |
||
1108 | unsigned now; |
||
1109 | |||
1110 | if (!HCD_IS_RUNNING (ehci->hcd.state)) |
||
1111 | break; |
||
1112 | ehci->next_uframe = now_uframe; |
||
1113 | now = readl (&ehci->regs->frame_index) % mod; |
||
1114 | if (now_uframe == now) |
||
1115 | break; |
||
1116 | |||
1117 | /* rescan the rest of this frame, then ... */ |
||
1118 | now_uframe = now; |
||
1119 | clock = now_uframe >> 3; |
||
1120 | } else |
||
1121 | frame = (frame + 1) % ehci->periodic_size; |
||
1122 | } |
||
1123 | } |