blob: 1360a5940fa05567bc288c4fc8056131bbb9c655 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Vivek Gautam4912dcc2013-09-14 14:02:45 +05302/*
3 * USB HOST XHCI Controller stack
4 *
5 * Based on xHCI host controller driver in linux-kernel
6 * by Sarah Sharp.
7 *
8 * Copyright (C) 2008 Intel Corp.
9 * Author: Sarah Sharp
10 *
11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13 * Vikas Sajjan <vikas.sajjan@samsung.com>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053014 */
15
Simon Glass63334482019-11-14 12:57:39 -070016#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060017#include <log.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053018#include <asm/byteorder.h>
19#include <usb.h>
20#include <asm/unaligned.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060021#include <linux/bug.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090022#include <linux/errno.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053023
Jean-Jacques Hiblotad4142b2019-09-11 11:33:46 +020024#include <usb/xhci.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053025
Mark Kettenisfac410c2023-01-21 20:27:55 +010026/*
27 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
28 * address of the TRB.
29 */
30dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
31 union xhci_trb *trb)
32{
33 unsigned long segment_offset;
34
35 if (!seg || !trb || trb < seg->trbs)
36 return 0;
37 /* offset in TRBs */
38 segment_offset = trb - seg->trbs;
39 if (segment_offset >= TRBS_PER_SEGMENT)
40 return 0;
41 return seg->dma + (segment_offset * sizeof(*trb));
42}
43
Vivek Gautam4912dcc2013-09-14 14:02:45 +053044/**
45 * Is this TRB a link TRB or was the last TRB the last TRB in this event ring
46 * segment? I.e. would the updated event TRB pointer step off the end of the
47 * event seg ?
48 *
49 * @param ctrl Host controller data structure
50 * @param ring pointer to the ring
51 * @param seg poniter to the segment to which TRB belongs
52 * @param trb poniter to the ring trb
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010053 * Return: 1 if this TRB a link TRB else 0
Vivek Gautam4912dcc2013-09-14 14:02:45 +053054 */
55static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
56 struct xhci_segment *seg, union xhci_trb *trb)
57{
58 if (ring == ctrl->event_ring)
59 return trb == &seg->trbs[TRBS_PER_SEGMENT];
60 else
61 return TRB_TYPE_LINK_LE32(trb->link.control);
62}
63
64/**
65 * Does this link TRB point to the first segment in a ring,
66 * or was the previous TRB the last TRB on the last segment in the ERST?
67 *
68 * @param ctrl Host controller data structure
69 * @param ring pointer to the ring
70 * @param seg poniter to the segment to which TRB belongs
71 * @param trb poniter to the ring trb
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010072 * Return: 1 if this TRB is the last TRB on the last segment else 0
Vivek Gautam4912dcc2013-09-14 14:02:45 +053073 */
74static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
75 struct xhci_ring *ring,
76 struct xhci_segment *seg,
77 union xhci_trb *trb)
78{
79 if (ring == ctrl->event_ring)
80 return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
81 (seg->next == ring->first_seg));
82 else
83 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
84}
85
86/**
87 * See Cycle bit rules. SW is the consumer for the event ring only.
88 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
89 *
90 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
91 * chain bit is set), then set the chain bit in all the following link TRBs.
92 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
93 * have their chain bit cleared (so that each Link TRB is a separate TD).
94 *
95 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
96 * set, but other sections talk about dealing with the chain bit set. This was
97 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
98 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
99 *
100 * @param ctrl Host controller data structure
101 * @param ring pointer to the ring
102 * @param more_trbs_coming flag to indicate whether more trbs
103 * are expected or NOT.
104 * Will you enqueue more TRBs before calling
105 * prepare_ring()?
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100106 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530107 */
108static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
109 bool more_trbs_coming)
110{
111 u32 chain;
112 union xhci_trb *next;
113
114 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
115 next = ++(ring->enqueue);
116
117 /*
118 * Update the dequeue pointer further if that was a link TRB or we're at
119 * the end of an event ring segment (which doesn't have link TRBS)
120 */
121 while (last_trb(ctrl, ring, ring->enq_seg, next)) {
122 if (ring != ctrl->event_ring) {
123 /*
124 * If the caller doesn't plan on enqueueing more
125 * TDs before ringing the doorbell, then we
126 * don't want to give the link TRB to the
127 * hardware just yet. We'll give the link TRB
128 * back in prepare_ring() just before we enqueue
129 * the TD at the top of the ring.
130 */
131 if (!chain && !more_trbs_coming)
132 break;
133
134 /*
135 * If we're not dealing with 0.95 hardware or
136 * isoc rings on AMD 0.96 host,
137 * carry over the chain bit of the previous TRB
138 * (which may mean the chain bit is cleared).
139 */
140 next->link.control &= cpu_to_le32(~TRB_CHAIN);
141 next->link.control |= cpu_to_le32(chain);
142
143 next->link.control ^= cpu_to_le32(TRB_CYCLE);
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300144 xhci_flush_cache((uintptr_t)next,
145 sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530146 }
147 /* Toggle the cycle bit after the last ring segment. */
148 if (last_trb_on_last_seg(ctrl, ring,
149 ring->enq_seg, next))
150 ring->cycle_state = (ring->cycle_state ? 0 : 1);
151
152 ring->enq_seg = ring->enq_seg->next;
153 ring->enqueue = ring->enq_seg->trbs;
154 next = ring->enqueue;
155 }
156}
157
158/**
159 * See Cycle bit rules. SW is the consumer for the event ring only.
160 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
161 *
162 * @param ctrl Host controller data structure
163 * @param ring Ring whose Dequeue TRB pointer needs to be incremented.
164 * return none
165 */
166static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
167{
168 do {
169 /*
170 * Update the dequeue pointer further if that was a link TRB or
171 * we're at the end of an event ring segment (which doesn't have
172 * link TRBS)
173 */
174 if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
175 if (ring == ctrl->event_ring &&
176 last_trb_on_last_seg(ctrl, ring,
177 ring->deq_seg, ring->dequeue)) {
178 ring->cycle_state = (ring->cycle_state ? 0 : 1);
179 }
180 ring->deq_seg = ring->deq_seg->next;
181 ring->dequeue = ring->deq_seg->trbs;
182 } else {
183 ring->dequeue++;
184 }
185 } while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
186}
187
188/**
189 * Generic function for queueing a TRB on a ring.
190 * The caller must have checked to make sure there's room on the ring.
191 *
192 * @param more_trbs_coming: Will you enqueue more TRBs before calling
193 * prepare_ring()?
194 * @param ctrl Host controller data structure
195 * @param ring pointer to the ring
196 * @param more_trbs_coming flag to indicate whether more trbs
197 * @param trb_fields pointer to trb field array containing TRB contents
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100198 * Return: pointer to the enqueued trb
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530199 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100200static dma_addr_t queue_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
201 bool more_trbs_coming, unsigned int *trb_fields)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530202{
203 struct xhci_generic_trb *trb;
Hector Martin02380882023-10-29 15:37:44 +0900204 dma_addr_t addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530205 int i;
206
207 trb = &ring->enqueue->generic;
208
209 for (i = 0; i < 4; i++)
210 trb->field[i] = cpu_to_le32(trb_fields[i]);
211
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300212 xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530213
Hector Martin02380882023-10-29 15:37:44 +0900214 addr = xhci_trb_virt_to_dma(ring->enq_seg, (union xhci_trb *)trb);
215
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530216 inc_enq(ctrl, ring, more_trbs_coming);
217
Hector Martin02380882023-10-29 15:37:44 +0900218 return addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530219}
220
221/**
222 * Does various checks on the endpoint ring, and makes it ready
223 * to queue num_trbs.
224 *
225 * @param ctrl Host controller data structure
226 * @param ep_ring pointer to the EP Transfer Ring
227 * @param ep_state State of the End Point
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100228 * Return: error code in case of invalid ep_state, 0 on success
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530229 */
230static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
231 u32 ep_state)
232{
233 union xhci_trb *next = ep_ring->enqueue;
234
235 /* Make sure the endpoint has been added to xHC schedule */
236 switch (ep_state) {
237 case EP_STATE_DISABLED:
238 /*
239 * USB core changed config/interfaces without notifying us,
240 * or hardware is reporting the wrong state.
241 */
242 puts("WARN urb submitted to disabled ep\n");
243 return -ENOENT;
244 case EP_STATE_ERROR:
245 puts("WARN waiting for error on ep to be cleared\n");
246 return -EINVAL;
247 case EP_STATE_HALTED:
Hector Martin1b823e22023-10-29 15:37:42 +0900248 puts("WARN endpoint is halted\n");
249 return -EINVAL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530250 case EP_STATE_STOPPED:
251 case EP_STATE_RUNNING:
252 debug("EP STATE RUNNING.\n");
253 break;
254 default:
255 puts("ERROR unknown endpoint state for ep\n");
256 return -EINVAL;
257 }
258
259 while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
260 /*
261 * If we're not dealing with 0.95 hardware or isoc rings
262 * on AMD 0.96 host, clear the chain bit.
263 */
264 next->link.control &= cpu_to_le32(~TRB_CHAIN);
265
266 next->link.control ^= cpu_to_le32(TRB_CYCLE);
267
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300268 xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530269
270 /* Toggle the cycle bit after the last ring segment. */
271 if (last_trb_on_last_seg(ctrl, ep_ring,
272 ep_ring->enq_seg, next))
273 ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
274 ep_ring->enq_seg = ep_ring->enq_seg->next;
275 ep_ring->enqueue = ep_ring->enq_seg->trbs;
276 next = ep_ring->enqueue;
277 }
278
279 return 0;
280}
281
282/**
283 * Generic function for queueing a command TRB on the command ring.
284 * Check to make sure there's room on the command ring for one command TRB.
285 *
286 * @param ctrl Host controller data structure
287 * @param ptr Pointer address to write in the first two fields (opt.)
288 * @param slot_id Slot ID to encode in the flags field (opt.)
289 * @param ep_index Endpoint index to encode in the flags field (opt.)
290 * @param cmd Command type to enqueue
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100291 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530292 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100293void xhci_queue_command(struct xhci_ctrl *ctrl, dma_addr_t addr, u32 slot_id,
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530294 u32 ep_index, trb_type cmd)
295{
296 u32 fields[4];
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530297
298 BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
299
Mark Kettenisfac410c2023-01-21 20:27:55 +0100300 fields[0] = lower_32_bits(addr);
301 fields[1] = upper_32_bits(addr);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530302 fields[2] = 0;
Bin Meng474b2502017-07-19 21:49:54 +0800303 fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
304 ctrl->cmd_ring->cycle_state;
305
306 /*
307 * Only 'reset endpoint', 'stop endpoint' and 'set TR dequeue pointer'
308 * commands need endpoint id encoded.
309 */
310 if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
311 fields[3] |= EP_ID_FOR_TRB(ep_index);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530312
313 queue_trb(ctrl, ctrl->cmd_ring, false, fields);
314
315 /* Ring the command ring doorbell */
316 xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
317}
318
developer570c2a92020-09-08 18:59:56 +0200319/*
320 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
321 * packets remaining in the TD (*not* including this TRB).
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530322 *
developer570c2a92020-09-08 18:59:56 +0200323 * Total TD packet count = total_packet_count =
324 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
325 *
326 * Packets transferred up to and including this TRB = packets_transferred =
327 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
328 *
329 * TD size = total_packet_count - packets_transferred
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530330 *
developer570c2a92020-09-08 18:59:56 +0200331 * For xHCI 0.96 and older, TD size field should be the remaining bytes
332 * including this TRB, right shifted by 10
333 *
334 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
335 * This is taken care of in the TRB_TD_SIZE() macro
336 *
337 * The last TRB in a TD must have the TD size set to zero.
338 *
339 * @param ctrl host controller data structure
340 * @param transferred total size sent so far
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530341 * @param trb_buff_len length of the TRB Buffer
developer570c2a92020-09-08 18:59:56 +0200342 * @param td_total_len total packet count
343 * @param maxp max packet size of current pipe
344 * @param more_trbs_coming indicate last trb in TD
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100345 * Return: remainder
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530346 */
developer570c2a92020-09-08 18:59:56 +0200347static u32 xhci_td_remainder(struct xhci_ctrl *ctrl, int transferred,
348 int trb_buff_len, unsigned int td_total_len,
349 int maxp, bool more_trbs_coming)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530350{
developer570c2a92020-09-08 18:59:56 +0200351 u32 total_packet_count;
352
developer80390532020-09-08 18:59:57 +0200353 /* MTK xHCI 0.96 contains some features from 1.0 */
354 if (ctrl->hci_version < 0x100 && !(ctrl->quirks & XHCI_MTK_HOST))
developer570c2a92020-09-08 18:59:56 +0200355 return ((td_total_len - transferred) >> 10);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530356
357 /* One TRB with a zero-length data packet. */
developer570c2a92020-09-08 18:59:56 +0200358 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
359 trb_buff_len == td_total_len)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530360 return 0;
361
developer80390532020-09-08 18:59:57 +0200362 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
363 if ((ctrl->quirks & XHCI_MTK_HOST) && (ctrl->hci_version < 0x100))
364 trb_buff_len = 0;
365
developer570c2a92020-09-08 18:59:56 +0200366 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530367
developer570c2a92020-09-08 18:59:56 +0200368 /* Queueing functions don't count the current TRB into transferred */
369 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530370}
371
372/**
373 * Ring the doorbell of the End Point
374 *
375 * @param udev pointer to the USB device structure
376 * @param ep_index index of the endpoint
377 * @param start_cycle cycle flag of the first TRB
378 * @param start_trb pionter to the first TRB
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100379 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530380 */
381static void giveback_first_trb(struct usb_device *udev, int ep_index,
382 int start_cycle,
383 struct xhci_generic_trb *start_trb)
384{
Simon Glassa49e27b2015-03-25 12:22:49 -0600385 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530386
387 /*
388 * Pass all the TRBs to the hardware at once and make sure this write
389 * isn't reordered.
390 */
391 if (start_cycle)
392 start_trb->field[3] |= cpu_to_le32(start_cycle);
393 else
394 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
395
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300396 xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530397
398 /* Ringing EP doorbell here */
399 xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
400 DB_VALUE(ep_index, 0));
401
402 return;
403}
404
405/**** POLLING mechanism for XHCI ****/
406
407/**
408 * Finalizes a handled event TRB by advancing our dequeue pointer and giving
409 * the TRB back to the hardware for recycling. Must call this exactly once at
410 * the end of each event handler, and not touch the TRB again afterwards.
411 *
412 * @param ctrl Host controller data structure
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100413 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530414 */
415void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
416{
Mark Kettenisfac410c2023-01-21 20:27:55 +0100417 dma_addr_t deq;
418
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530419 /* Advance our dequeue pointer to the next event */
420 inc_deq(ctrl, ctrl->event_ring);
421
422 /* Inform the hardware */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100423 deq = xhci_trb_virt_to_dma(ctrl->event_ring->deq_seg,
424 ctrl->event_ring->dequeue);
425 xhci_writeq(&ctrl->ir_set->erst_dequeue, deq | ERST_EHB);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530426}
427
428/**
429 * Checks if there is a new event to handle on the event ring.
430 *
431 * @param ctrl Host controller data structure
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100432 * Return: 0 if failure else 1 on success
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530433 */
434static int event_ready(struct xhci_ctrl *ctrl)
435{
436 union xhci_trb *event;
437
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300438 xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
439 sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530440
441 event = ctrl->event_ring->dequeue;
442
443 /* Does the HC or OS own the TRB? */
444 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
445 ctrl->event_ring->cycle_state)
446 return 0;
447
448 return 1;
449}
450
451/**
452 * Waits for a specific type of event and returns it. Discards unexpected
453 * events. Caller *must* call xhci_acknowledge_event() after it is finished
454 * processing the event, and must not access the returned pointer afterwards.
455 *
456 * @param ctrl Host controller data structure
457 * @param expected TRB type expected from Event TRB
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100458 * Return: pointer to event trb
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530459 */
460union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
461{
462 trb_type type;
463 unsigned long ts = get_timer(0);
464
465 do {
466 union xhci_trb *event = ctrl->event_ring->dequeue;
467
468 if (!event_ready(ctrl))
469 continue;
470
471 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
Hector Martin191772f2023-10-29 15:37:39 +0900472 if (type == expected ||
473 (expected == TRB_NONE && type != TRB_PORT_STATUS))
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530474 return event;
475
476 if (type == TRB_PORT_STATUS)
477 /* TODO: remove this once enumeration has been reworked */
478 /*
479 * Port status change events always have a
480 * successful completion code
481 */
482 BUG_ON(GET_COMP_CODE(
483 le32_to_cpu(event->generic.field[2])) !=
484 COMP_SUCCESS);
485 else
486 printf("Unexpected XHCI event TRB, skipping... "
487 "(%08x %08x %08x %08x)\n",
488 le32_to_cpu(event->generic.field[0]),
489 le32_to_cpu(event->generic.field[1]),
490 le32_to_cpu(event->generic.field[2]),
491 le32_to_cpu(event->generic.field[3]));
492
493 xhci_acknowledge_event(ctrl);
494 } while (get_timer(ts) < XHCI_TIMEOUT);
495
496 if (expected == TRB_TRANSFER)
497 return NULL;
498
Hector Martin27783072023-10-29 15:37:43 +0900499 printf("XHCI timeout on event type %d...\n", expected);
500
501 return NULL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530502}
503
504/*
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200505 * Send reset endpoint command for given endpoint. This recovers from a
506 * halted endpoint (e.g. due to a stall error).
507 */
508static void reset_ep(struct usb_device *udev, int ep_index)
509{
510 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
511 struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
512 union xhci_trb *event;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100513 u64 addr;
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200514 u32 field;
515
516 printf("Resetting EP %d...\n", ep_index);
Mark Kettenisfac410c2023-01-21 20:27:55 +0100517 xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_RESET_EP);
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200518 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900519 if (!event)
520 return;
521
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200522 field = le32_to_cpu(event->trans_event.flags);
523 BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
524 xhci_acknowledge_event(ctrl);
525
Mark Kettenisfac410c2023-01-21 20:27:55 +0100526 addr = xhci_trb_virt_to_dma(ring->enq_seg,
527 (void *)((uintptr_t)ring->enqueue | ring->cycle_state));
528 xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200529 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900530 if (!event)
531 return;
532
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200533 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
534 != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
535 event->event_cmd.status)) != COMP_SUCCESS);
536 xhci_acknowledge_event(ctrl);
537}
538
539/*
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530540 * Stops transfer processing for an endpoint and throws away all unprocessed
541 * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
542 * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
543 * ring the doorbell, causing this endpoint to start working again.
544 * (Careful: This will BUG() when there was no transfer in progress. Shouldn't
545 * happen in practice for current uses and is too complicated to fix right now.)
546 */
547static void abort_td(struct usb_device *udev, int ep_index)
548{
Simon Glassa49e27b2015-03-25 12:22:49 -0600549 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530550 struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
551 union xhci_trb *event;
Hector Martinf28db6b2023-10-29 15:37:40 +0900552 xhci_comp_code comp;
Hector Martin191772f2023-10-29 15:37:39 +0900553 trb_type type;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100554 u64 addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530555 u32 field;
556
Mark Kettenisfac410c2023-01-21 20:27:55 +0100557 xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_STOP_RING);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530558
Hector Martin191772f2023-10-29 15:37:39 +0900559 event = xhci_wait_for_event(ctrl, TRB_NONE);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900560 if (!event)
561 return;
562
Hector Martin191772f2023-10-29 15:37:39 +0900563 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
564 if (type == TRB_TRANSFER) {
565 field = le32_to_cpu(event->trans_event.flags);
566 BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
567 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
568 BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
569 != COMP_STOP)));
570 xhci_acknowledge_event(ctrl);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530571
Hector Martin191772f2023-10-29 15:37:39 +0900572 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
573 if (!event)
574 return;
575 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
Hector Martinc5d7dac2023-10-29 15:37:38 +0900576
Hector Martin191772f2023-10-29 15:37:39 +0900577 } else {
578 printf("abort_td: Expected a TRB_TRANSFER TRB first\n");
579 }
580
Hector Martinf28db6b2023-10-29 15:37:40 +0900581 comp = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
Hector Martin191772f2023-10-29 15:37:39 +0900582 BUG_ON(type != TRB_COMPLETION ||
583 TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
Hector Martinf28db6b2023-10-29 15:37:40 +0900584 != udev->slot_id || (comp != COMP_SUCCESS && comp
585 != COMP_CTX_STATE));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530586 xhci_acknowledge_event(ctrl);
587
Mark Kettenisfac410c2023-01-21 20:27:55 +0100588 addr = xhci_trb_virt_to_dma(ring->enq_seg,
589 (void *)((uintptr_t)ring->enqueue | ring->cycle_state));
590 xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530591 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900592 if (!event)
593 return;
594
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530595 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
596 != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
597 event->event_cmd.status)) != COMP_SUCCESS);
598 xhci_acknowledge_event(ctrl);
599}
600
601static void record_transfer_result(struct usb_device *udev,
602 union xhci_trb *event, int length)
603{
604 udev->act_len = min(length, length -
Masahiro Yamadadb204642014-11-07 03:03:31 +0900605 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530606
607 switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
608 case COMP_SUCCESS:
609 BUG_ON(udev->act_len != length);
610 /* fallthrough */
611 case COMP_SHORT_TX:
612 udev->status = 0;
613 break;
614 case COMP_STALL:
615 udev->status = USB_ST_STALLED;
616 break;
617 case COMP_DB_ERR:
618 case COMP_TRB_ERR:
619 udev->status = USB_ST_BUF_ERR;
620 break;
621 case COMP_BABBLE:
622 udev->status = USB_ST_BABBLE_DET;
623 break;
624 default:
625 udev->status = 0x80; /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
626 }
627}
628
629/**** Bulk and Control transfer methods ****/
630/**
631 * Queues up the BULK Request
632 *
633 * @param udev pointer to the USB device structure
634 * @param pipe contains the DIR_IN or OUT , devnum
635 * @param length length of the buffer
636 * @param buffer buffer to be read/written based on the request
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100637 * Return: returns 0 if successful else -1 on failure
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530638 */
639int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
640 int length, void *buffer)
641{
642 int num_trbs = 0;
643 struct xhci_generic_trb *start_trb;
Gustavo A. R. Silva0a1ef7c2018-01-20 02:37:31 -0600644 bool first_trb = false;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530645 int start_cycle;
646 u32 field = 0;
647 u32 length_field = 0;
Simon Glassa49e27b2015-03-25 12:22:49 -0600648 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530649 int slot_id = udev->slot_id;
650 int ep_index;
651 struct xhci_virt_device *virt_dev;
652 struct xhci_ep_ctx *ep_ctx;
653 struct xhci_ring *ring; /* EP transfer ring */
654 union xhci_trb *event;
655
656 int running_total, trb_buff_len;
developer570c2a92020-09-08 18:59:56 +0200657 bool more_trbs_coming = true;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530658 int maxpacketsize;
659 u64 addr;
660 int ret;
661 u32 trb_fields[4];
Mark Kettenisfac410c2023-01-21 20:27:55 +0100662 u64 buf_64 = xhci_dma_map(ctrl, buffer, length);
663 dma_addr_t last_transfer_trb_addr;
Ran Wanga0505832020-11-18 15:49:02 +0800664 int available_length;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530665
666 debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
667 udev, pipe, buffer, length);
668
Ran Wanga0505832020-11-18 15:49:02 +0800669 available_length = length;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530670 ep_index = usb_pipe_ep_index(pipe);
671 virt_dev = ctrl->devs[slot_id];
672
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300673 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
674 virt_dev->out_ctx->size);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530675
676 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
677
Hector Martin6d204372023-10-29 15:37:41 +0900678 /*
679 * If the endpoint was halted due to a prior error, resume it before
680 * the next transfer. It is the responsibility of the upper layer to
681 * have dealt with whatever caused the error.
682 */
683 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
684 reset_ep(udev, ep_index);
685
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530686 ring = virt_dev->eps[ep_index].ring;
Janne Grunau727c6b52024-04-04 08:25:51 +0200687 if (!ring)
688 return -EINVAL;
689
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530690 /*
691 * How much data is (potentially) left before the 64KB boundary?
692 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
693 * that the buffer should not span 64KB boundary. if so
694 * we send request in more than 1 TRB by chaining them.
695 */
696 running_total = TRB_MAX_BUFF_SIZE -
Mark Kettenisfac410c2023-01-21 20:27:55 +0100697 (lower_32_bits(buf_64) & (TRB_MAX_BUFF_SIZE - 1));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530698 trb_buff_len = running_total;
699 running_total &= TRB_MAX_BUFF_SIZE - 1;
700
701 /*
702 * If there's some data on this 64KB chunk, or we have to send a
703 * zero-length transfer, we need at least one TRB
704 */
705 if (running_total != 0 || length == 0)
706 num_trbs++;
707
708 /* How many more 64KB chunks to transfer, how many more TRBs? */
709 while (running_total < length) {
710 num_trbs++;
711 running_total += TRB_MAX_BUFF_SIZE;
712 }
713
714 /*
715 * XXX: Calling routine prepare_ring() called in place of
716 * prepare_trasfer() as there in 'Linux' since we are not
717 * maintaining multiple TDs/transfer at the same time.
718 */
719 ret = prepare_ring(ctrl, ring,
720 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
721 if (ret < 0)
722 return ret;
723
724 /*
725 * Don't give the first TRB to the hardware (by toggling the cycle bit)
726 * until we've finished creating all the other TRBs. The ring's cycle
727 * state may change as we enqueue the other TRBs, so save it too.
728 */
729 start_trb = &ring->enqueue->generic;
730 start_cycle = ring->cycle_state;
731
732 running_total = 0;
733 maxpacketsize = usb_maxpacket(udev, pipe);
734
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530735 /* How much data is in the first TRB? */
736 /*
737 * How much data is (potentially) left before the 64KB boundary?
738 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
739 * that the buffer should not span 64KB boundary. if so
740 * we send request in more than 1 TRB by chaining them.
741 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100742 addr = buf_64;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530743
744 if (trb_buff_len > length)
745 trb_buff_len = length;
746
747 first_trb = true;
748
749 /* flush the buffer before use */
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300750 xhci_flush_cache((uintptr_t)buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530751
752 /* Queue the first TRB, even if it's zero-length */
753 do {
754 u32 remainder = 0;
755 field = 0;
756 /* Don't change the cycle bit of the first TRB until later */
757 if (first_trb) {
758 first_trb = false;
759 if (start_cycle == 0)
760 field |= TRB_CYCLE;
761 } else {
762 field |= ring->cycle_state;
763 }
764
765 /*
766 * Chain all the TRBs together; clear the chain bit in the last
767 * TRB to indicate it's the last TRB in the chain.
768 */
developer570c2a92020-09-08 18:59:56 +0200769 if (num_trbs > 1) {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530770 field |= TRB_CHAIN;
developer570c2a92020-09-08 18:59:56 +0200771 } else {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530772 field |= TRB_IOC;
developer570c2a92020-09-08 18:59:56 +0200773 more_trbs_coming = false;
774 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530775
776 /* Only set interrupt on short packet for IN endpoints */
777 if (usb_pipein(pipe))
778 field |= TRB_ISP;
779
780 /* Set the TRB length, TD size, and interrupter fields. */
developer570c2a92020-09-08 18:59:56 +0200781 remainder = xhci_td_remainder(ctrl, running_total, trb_buff_len,
782 length, maxpacketsize,
783 more_trbs_coming);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530784
developer6cabb142020-09-08 19:00:00 +0200785 length_field = (TRB_LEN(trb_buff_len) |
developer570c2a92020-09-08 18:59:56 +0200786 TRB_TD_SIZE(remainder) |
developer6cabb142020-09-08 19:00:00 +0200787 TRB_INTR_TARGET(0));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530788
789 trb_fields[0] = lower_32_bits(addr);
790 trb_fields[1] = upper_32_bits(addr);
791 trb_fields[2] = length_field;
developer497dcfa2020-09-08 18:59:59 +0200792 trb_fields[3] = field | TRB_TYPE(TRB_NORMAL);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530793
Ran Wanga0505832020-11-18 15:49:02 +0800794 last_transfer_trb_addr = queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530795
796 --num_trbs;
797
798 running_total += trb_buff_len;
799
800 /* Calculate length for next transfer */
801 addr += trb_buff_len;
802 trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
803 } while (running_total < length);
804
805 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
806
Ran Wanga0505832020-11-18 15:49:02 +0800807again:
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530808 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
809 if (!event) {
810 debug("XHCI bulk transfer timed out, aborting...\n");
811 abort_td(udev, ep_index);
812 udev->status = USB_ST_NAK_REC; /* closest thing to a timeout */
813 udev->act_len = 0;
814 return -ETIMEDOUT;
815 }
Ran Wanga0505832020-11-18 15:49:02 +0800816
Stefan Roese5e3c1462021-01-15 08:52:56 +0100817 if ((uintptr_t)(le64_to_cpu(event->trans_event.buffer)) !=
Mark Kettenisfac410c2023-01-21 20:27:55 +0100818 (uintptr_t)last_transfer_trb_addr) {
Ran Wanga0505832020-11-18 15:49:02 +0800819 available_length -=
820 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len));
821 xhci_acknowledge_event(ctrl);
822 goto again;
823 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530824
Ran Wanga0505832020-11-18 15:49:02 +0800825 field = le32_to_cpu(event->trans_event.flags);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530826 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
827 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530828
Ran Wanga0505832020-11-18 15:49:02 +0800829 record_transfer_result(udev, event, available_length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530830 xhci_acknowledge_event(ctrl);
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300831 xhci_inval_cache((uintptr_t)buffer, length);
Mark Kettenisfac410c2023-01-21 20:27:55 +0100832 xhci_dma_unmap(ctrl, buf_64, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530833
834 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
835}
836
837/**
838 * Queues up the Control Transfer Request
839 *
840 * @param udev pointer to the USB device structure
841 * @param pipe contains the DIR_IN or OUT , devnum
842 * @param req request type
843 * @param length length of the buffer
844 * @param buffer buffer to be read/written based on the request
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100845 * Return: returns 0 if successful else error code on failure
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530846 */
847int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
848 struct devrequest *req, int length,
849 void *buffer)
850{
851 int ret;
852 int start_cycle;
853 int num_trbs;
854 u32 field;
855 u32 length_field;
856 u64 buf_64 = 0;
857 struct xhci_generic_trb *start_trb;
Simon Glassa49e27b2015-03-25 12:22:49 -0600858 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530859 int slot_id = udev->slot_id;
860 int ep_index;
861 u32 trb_fields[4];
862 struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
863 struct xhci_ring *ep_ring;
864 union xhci_trb *event;
developer570c2a92020-09-08 18:59:56 +0200865 u32 remainder;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530866
867 debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
868 req->request, req->request,
869 req->requesttype, req->requesttype,
870 le16_to_cpu(req->value), le16_to_cpu(req->value),
871 le16_to_cpu(req->index));
872
873 ep_index = usb_pipe_ep_index(pipe);
874
875 ep_ring = virt_dev->eps[ep_index].ring;
Janne Grunau727c6b52024-04-04 08:25:51 +0200876 if (!ep_ring)
877 return -EINVAL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530878
879 /*
880 * Check to see if the max packet size for the default control
881 * endpoint changed during FS device enumeration
882 */
883 if (udev->speed == USB_SPEED_FULL) {
884 ret = xhci_check_maxpacket(udev);
885 if (ret < 0)
886 return ret;
887 }
888
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300889 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
890 virt_dev->out_ctx->size);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530891
892 struct xhci_ep_ctx *ep_ctx = NULL;
893 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
894
895 /* 1 TRB for setup, 1 for status */
896 num_trbs = 2;
897 /*
898 * Don't need to check if we need additional event data and normal TRBs,
899 * since data in control transfers will never get bigger than 16MB
900 * XXX: can we get a buffer that crosses 64KB boundaries?
901 */
902
903 if (length > 0)
904 num_trbs++;
905 /*
906 * XXX: Calling routine prepare_ring() called in place of
907 * prepare_trasfer() as there in 'Linux' since we are not
908 * maintaining multiple TDs/transfer at the same time.
909 */
910 ret = prepare_ring(ctrl, ep_ring,
911 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
912
913 if (ret < 0)
914 return ret;
915
916 /*
917 * Don't give the first TRB to the hardware (by toggling the cycle bit)
918 * until we've finished creating all the other TRBs. The ring's cycle
919 * state may change as we enqueue the other TRBs, so save it too.
920 */
921 start_trb = &ep_ring->enqueue->generic;
922 start_cycle = ep_ring->cycle_state;
923
924 debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
925
926 /* Queue setup TRB - see section 6.4.1.2.1 */
927 /* FIXME better way to translate setup_packet into two u32 fields? */
928 field = 0;
developer497dcfa2020-09-08 18:59:59 +0200929 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530930 if (start_cycle == 0)
931 field |= 0x1;
932
933 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
developer80390532020-09-08 18:59:57 +0200934 if (ctrl->hci_version >= 0x100 || ctrl->quirks & XHCI_MTK_HOST) {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530935 if (length > 0) {
936 if (req->requesttype & USB_DIR_IN)
developer57c052b2020-09-08 19:00:01 +0200937 field |= TRB_TX_TYPE(TRB_DATA_IN);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530938 else
developer57c052b2020-09-08 19:00:01 +0200939 field |= TRB_TX_TYPE(TRB_DATA_OUT);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530940 }
941 }
942
Stefan Roeseede9de12021-04-06 12:10:18 +0200943 debug("req->requesttype = %d, req->request = %d, req->value = %d, req->index = %d, req->length = %d\n",
944 req->requesttype, req->request, le16_to_cpu(req->value),
945 le16_to_cpu(req->index), le16_to_cpu(req->length));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530946
947 trb_fields[0] = req->requesttype | req->request << 8 |
948 le16_to_cpu(req->value) << 16;
949 trb_fields[1] = le16_to_cpu(req->index) |
950 le16_to_cpu(req->length) << 16;
951 /* TRB_LEN | (TRB_INTR_TARGET) */
developer6cabb142020-09-08 19:00:00 +0200952 trb_fields[2] = (TRB_LEN(8) | TRB_INTR_TARGET(0));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530953 /* Immediate data in pointer */
954 trb_fields[3] = field;
955 queue_trb(ctrl, ep_ring, true, trb_fields);
956
957 /* Re-initializing field to zero */
958 field = 0;
959 /* If there's data, queue data TRBs */
960 /* Only set interrupt on short packet for IN endpoints */
961 if (usb_pipein(pipe))
developer497dcfa2020-09-08 18:59:59 +0200962 field = TRB_ISP | TRB_TYPE(TRB_DATA);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530963 else
developer497dcfa2020-09-08 18:59:59 +0200964 field = TRB_TYPE(TRB_DATA);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530965
developer570c2a92020-09-08 18:59:56 +0200966 remainder = xhci_td_remainder(ctrl, 0, length, length,
967 usb_maxpacket(udev, pipe), true);
developer6cabb142020-09-08 19:00:00 +0200968 length_field = TRB_LEN(length) | TRB_TD_SIZE(remainder) |
969 TRB_INTR_TARGET(0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530970 debug("length_field = %d, length = %d,"
971 "xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
developer6cabb142020-09-08 19:00:00 +0200972 length_field, TRB_LEN(length),
developer570c2a92020-09-08 18:59:56 +0200973 TRB_TD_SIZE(remainder), 0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530974
975 if (length > 0) {
976 if (req->requesttype & USB_DIR_IN)
977 field |= TRB_DIR_IN;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100978 buf_64 = xhci_dma_map(ctrl, buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530979
980 trb_fields[0] = lower_32_bits(buf_64);
981 trb_fields[1] = upper_32_bits(buf_64);
982 trb_fields[2] = length_field;
983 trb_fields[3] = field | ep_ring->cycle_state;
984
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300985 xhci_flush_cache((uintptr_t)buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530986 queue_trb(ctrl, ep_ring, true, trb_fields);
987 }
988
989 /*
990 * Queue status TRB -
991 * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
992 */
993
994 /* If the device sent data, the status stage is an OUT transfer */
995 field = 0;
996 if (length > 0 && req->requesttype & USB_DIR_IN)
997 field = 0;
998 else
999 field = TRB_DIR_IN;
1000
1001 trb_fields[0] = 0;
1002 trb_fields[1] = 0;
developer6cabb142020-09-08 19:00:00 +02001003 trb_fields[2] = TRB_INTR_TARGET(0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301004 /* Event on completion */
1005 trb_fields[3] = field | TRB_IOC |
developer497dcfa2020-09-08 18:59:59 +02001006 TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state;
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301007
1008 queue_trb(ctrl, ep_ring, false, trb_fields);
1009
1010 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
1011
1012 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1013 if (!event)
1014 goto abort;
1015 field = le32_to_cpu(event->trans_event.flags);
1016
1017 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1018 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1019
1020 record_transfer_result(udev, event, length);
1021 xhci_acknowledge_event(ctrl);
Stefan Agnerec2b73d2021-09-27 14:42:58 +02001022 if (udev->status == USB_ST_STALLED) {
1023 reset_ep(udev, ep_index);
1024 return -EPIPE;
1025 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301026
1027 /* Invalidate buffer to make it available to usb-core */
Mark Kettenisfac410c2023-01-21 20:27:55 +01001028 if (length > 0) {
Sergey Temerkhanov38593462015-04-01 17:18:45 +03001029 xhci_inval_cache((uintptr_t)buffer, length);
Mark Kettenisfac410c2023-01-21 20:27:55 +01001030 xhci_dma_unmap(ctrl, buf_64, length);
1031 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301032
1033 if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
1034 == COMP_SHORT_TX) {
1035 /* Short data stage, clear up additional status stage event */
1036 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1037 if (!event)
1038 goto abort;
1039 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1040 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1041 xhci_acknowledge_event(ctrl);
1042 }
1043
1044 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
1045
1046abort:
1047 debug("XHCI control transfer timed out, aborting...\n");
1048 abort_td(udev, ep_index);
1049 udev->status = USB_ST_NAK_REC;
1050 udev->act_len = 0;
1051 return -ETIMEDOUT;
1052}