blob: 34eb4536f0eab5923bb6a395fcfb8ba87344577c [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Vivek Gautam4912dcc2013-09-14 14:02:45 +05302/*
3 * USB HOST XHCI Controller stack
4 *
5 * Based on xHCI host controller driver in linux-kernel
6 * by Sarah Sharp.
7 *
8 * Copyright (C) 2008 Intel Corp.
9 * Author: Sarah Sharp
10 *
11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13 * Vikas Sajjan <vikas.sajjan@samsung.com>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053014 */
15
Simon Glass63334482019-11-14 12:57:39 -070016#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060017#include <log.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053018#include <asm/byteorder.h>
19#include <usb.h>
Godfrey Mwangi4200c6c2023-08-04 12:00:39 -070020#include <watchdog.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053021#include <asm/unaligned.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060022#include <linux/bug.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090023#include <linux/errno.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053024
Jean-Jacques Hiblotad4142b2019-09-11 11:33:46 +020025#include <usb/xhci.h>
Vivek Gautam4912dcc2013-09-14 14:02:45 +053026
Mark Kettenisfac410c2023-01-21 20:27:55 +010027/*
28 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
29 * address of the TRB.
30 */
31dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
32 union xhci_trb *trb)
33{
34 unsigned long segment_offset;
35
36 if (!seg || !trb || trb < seg->trbs)
37 return 0;
38 /* offset in TRBs */
39 segment_offset = trb - seg->trbs;
40 if (segment_offset >= TRBS_PER_SEGMENT)
41 return 0;
42 return seg->dma + (segment_offset * sizeof(*trb));
43}
44
Vivek Gautam4912dcc2013-09-14 14:02:45 +053045/**
46 * Is this TRB a link TRB or was the last TRB the last TRB in this event ring
47 * segment? I.e. would the updated event TRB pointer step off the end of the
48 * event seg ?
49 *
50 * @param ctrl Host controller data structure
51 * @param ring pointer to the ring
52 * @param seg poniter to the segment to which TRB belongs
53 * @param trb poniter to the ring trb
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010054 * Return: 1 if this TRB a link TRB else 0
Vivek Gautam4912dcc2013-09-14 14:02:45 +053055 */
56static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
57 struct xhci_segment *seg, union xhci_trb *trb)
58{
59 if (ring == ctrl->event_ring)
60 return trb == &seg->trbs[TRBS_PER_SEGMENT];
61 else
62 return TRB_TYPE_LINK_LE32(trb->link.control);
63}
64
65/**
66 * Does this link TRB point to the first segment in a ring,
67 * or was the previous TRB the last TRB on the last segment in the ERST?
68 *
69 * @param ctrl Host controller data structure
70 * @param ring pointer to the ring
71 * @param seg poniter to the segment to which TRB belongs
72 * @param trb poniter to the ring trb
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010073 * Return: 1 if this TRB is the last TRB on the last segment else 0
Vivek Gautam4912dcc2013-09-14 14:02:45 +053074 */
75static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
76 struct xhci_ring *ring,
77 struct xhci_segment *seg,
78 union xhci_trb *trb)
79{
80 if (ring == ctrl->event_ring)
81 return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
82 (seg->next == ring->first_seg));
83 else
84 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
85}
86
87/**
88 * See Cycle bit rules. SW is the consumer for the event ring only.
89 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
90 *
91 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
92 * chain bit is set), then set the chain bit in all the following link TRBs.
93 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
94 * have their chain bit cleared (so that each Link TRB is a separate TD).
95 *
96 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
97 * set, but other sections talk about dealing with the chain bit set. This was
98 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
99 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
100 *
101 * @param ctrl Host controller data structure
102 * @param ring pointer to the ring
103 * @param more_trbs_coming flag to indicate whether more trbs
104 * are expected or NOT.
105 * Will you enqueue more TRBs before calling
106 * prepare_ring()?
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100107 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530108 */
109static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
110 bool more_trbs_coming)
111{
112 u32 chain;
113 union xhci_trb *next;
114
115 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
116 next = ++(ring->enqueue);
117
118 /*
119 * Update the dequeue pointer further if that was a link TRB or we're at
120 * the end of an event ring segment (which doesn't have link TRBS)
121 */
122 while (last_trb(ctrl, ring, ring->enq_seg, next)) {
123 if (ring != ctrl->event_ring) {
124 /*
125 * If the caller doesn't plan on enqueueing more
126 * TDs before ringing the doorbell, then we
127 * don't want to give the link TRB to the
128 * hardware just yet. We'll give the link TRB
129 * back in prepare_ring() just before we enqueue
130 * the TD at the top of the ring.
131 */
132 if (!chain && !more_trbs_coming)
133 break;
134
135 /*
136 * If we're not dealing with 0.95 hardware or
137 * isoc rings on AMD 0.96 host,
138 * carry over the chain bit of the previous TRB
139 * (which may mean the chain bit is cleared).
140 */
141 next->link.control &= cpu_to_le32(~TRB_CHAIN);
142 next->link.control |= cpu_to_le32(chain);
143
144 next->link.control ^= cpu_to_le32(TRB_CYCLE);
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300145 xhci_flush_cache((uintptr_t)next,
146 sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530147 }
148 /* Toggle the cycle bit after the last ring segment. */
149 if (last_trb_on_last_seg(ctrl, ring,
150 ring->enq_seg, next))
151 ring->cycle_state = (ring->cycle_state ? 0 : 1);
152
153 ring->enq_seg = ring->enq_seg->next;
154 ring->enqueue = ring->enq_seg->trbs;
155 next = ring->enqueue;
156 }
157}
158
159/**
160 * See Cycle bit rules. SW is the consumer for the event ring only.
161 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
162 *
163 * @param ctrl Host controller data structure
164 * @param ring Ring whose Dequeue TRB pointer needs to be incremented.
165 * return none
166 */
167static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
168{
169 do {
170 /*
171 * Update the dequeue pointer further if that was a link TRB or
172 * we're at the end of an event ring segment (which doesn't have
173 * link TRBS)
174 */
175 if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
176 if (ring == ctrl->event_ring &&
177 last_trb_on_last_seg(ctrl, ring,
178 ring->deq_seg, ring->dequeue)) {
179 ring->cycle_state = (ring->cycle_state ? 0 : 1);
180 }
181 ring->deq_seg = ring->deq_seg->next;
182 ring->dequeue = ring->deq_seg->trbs;
183 } else {
184 ring->dequeue++;
185 }
186 } while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
187}
188
189/**
190 * Generic function for queueing a TRB on a ring.
191 * The caller must have checked to make sure there's room on the ring.
192 *
193 * @param more_trbs_coming: Will you enqueue more TRBs before calling
194 * prepare_ring()?
195 * @param ctrl Host controller data structure
196 * @param ring pointer to the ring
197 * @param more_trbs_coming flag to indicate whether more trbs
198 * @param trb_fields pointer to trb field array containing TRB contents
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100199 * Return: pointer to the enqueued trb
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530200 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100201static dma_addr_t queue_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
202 bool more_trbs_coming, unsigned int *trb_fields)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530203{
204 struct xhci_generic_trb *trb;
Hector Martin02380882023-10-29 15:37:44 +0900205 dma_addr_t addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530206 int i;
207
208 trb = &ring->enqueue->generic;
209
210 for (i = 0; i < 4; i++)
211 trb->field[i] = cpu_to_le32(trb_fields[i]);
212
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300213 xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530214
Hector Martin02380882023-10-29 15:37:44 +0900215 addr = xhci_trb_virt_to_dma(ring->enq_seg, (union xhci_trb *)trb);
216
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530217 inc_enq(ctrl, ring, more_trbs_coming);
218
Hector Martin02380882023-10-29 15:37:44 +0900219 return addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530220}
221
222/**
223 * Does various checks on the endpoint ring, and makes it ready
224 * to queue num_trbs.
225 *
226 * @param ctrl Host controller data structure
227 * @param ep_ring pointer to the EP Transfer Ring
228 * @param ep_state State of the End Point
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100229 * Return: error code in case of invalid ep_state, 0 on success
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530230 */
231static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
232 u32 ep_state)
233{
234 union xhci_trb *next = ep_ring->enqueue;
235
236 /* Make sure the endpoint has been added to xHC schedule */
237 switch (ep_state) {
238 case EP_STATE_DISABLED:
239 /*
240 * USB core changed config/interfaces without notifying us,
241 * or hardware is reporting the wrong state.
242 */
243 puts("WARN urb submitted to disabled ep\n");
244 return -ENOENT;
245 case EP_STATE_ERROR:
246 puts("WARN waiting for error on ep to be cleared\n");
247 return -EINVAL;
248 case EP_STATE_HALTED:
Hector Martin1b823e22023-10-29 15:37:42 +0900249 puts("WARN endpoint is halted\n");
250 return -EINVAL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530251 case EP_STATE_STOPPED:
252 case EP_STATE_RUNNING:
253 debug("EP STATE RUNNING.\n");
254 break;
255 default:
256 puts("ERROR unknown endpoint state for ep\n");
257 return -EINVAL;
258 }
259
260 while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
261 /*
262 * If we're not dealing with 0.95 hardware or isoc rings
263 * on AMD 0.96 host, clear the chain bit.
264 */
265 next->link.control &= cpu_to_le32(~TRB_CHAIN);
266
267 next->link.control ^= cpu_to_le32(TRB_CYCLE);
268
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300269 xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530270
271 /* Toggle the cycle bit after the last ring segment. */
272 if (last_trb_on_last_seg(ctrl, ep_ring,
273 ep_ring->enq_seg, next))
274 ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
275 ep_ring->enq_seg = ep_ring->enq_seg->next;
276 ep_ring->enqueue = ep_ring->enq_seg->trbs;
277 next = ep_ring->enqueue;
278 }
279
280 return 0;
281}
282
283/**
284 * Generic function for queueing a command TRB on the command ring.
285 * Check to make sure there's room on the command ring for one command TRB.
286 *
287 * @param ctrl Host controller data structure
288 * @param ptr Pointer address to write in the first two fields (opt.)
289 * @param slot_id Slot ID to encode in the flags field (opt.)
290 * @param ep_index Endpoint index to encode in the flags field (opt.)
291 * @param cmd Command type to enqueue
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100292 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530293 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100294void xhci_queue_command(struct xhci_ctrl *ctrl, dma_addr_t addr, u32 slot_id,
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530295 u32 ep_index, trb_type cmd)
296{
297 u32 fields[4];
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530298
299 BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
300
Mark Kettenisfac410c2023-01-21 20:27:55 +0100301 fields[0] = lower_32_bits(addr);
302 fields[1] = upper_32_bits(addr);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530303 fields[2] = 0;
Bin Meng474b2502017-07-19 21:49:54 +0800304 fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
305 ctrl->cmd_ring->cycle_state;
306
307 /*
308 * Only 'reset endpoint', 'stop endpoint' and 'set TR dequeue pointer'
309 * commands need endpoint id encoded.
310 */
311 if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
312 fields[3] |= EP_ID_FOR_TRB(ep_index);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530313
314 queue_trb(ctrl, ctrl->cmd_ring, false, fields);
315
316 /* Ring the command ring doorbell */
317 xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
318}
319
developer570c2a92020-09-08 18:59:56 +0200320/*
321 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
322 * packets remaining in the TD (*not* including this TRB).
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530323 *
developer570c2a92020-09-08 18:59:56 +0200324 * Total TD packet count = total_packet_count =
325 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
326 *
327 * Packets transferred up to and including this TRB = packets_transferred =
328 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
329 *
330 * TD size = total_packet_count - packets_transferred
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530331 *
developer570c2a92020-09-08 18:59:56 +0200332 * For xHCI 0.96 and older, TD size field should be the remaining bytes
333 * including this TRB, right shifted by 10
334 *
335 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
336 * This is taken care of in the TRB_TD_SIZE() macro
337 *
338 * The last TRB in a TD must have the TD size set to zero.
339 *
340 * @param ctrl host controller data structure
341 * @param transferred total size sent so far
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530342 * @param trb_buff_len length of the TRB Buffer
developer570c2a92020-09-08 18:59:56 +0200343 * @param td_total_len total packet count
344 * @param maxp max packet size of current pipe
345 * @param more_trbs_coming indicate last trb in TD
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100346 * Return: remainder
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530347 */
developer570c2a92020-09-08 18:59:56 +0200348static u32 xhci_td_remainder(struct xhci_ctrl *ctrl, int transferred,
349 int trb_buff_len, unsigned int td_total_len,
350 int maxp, bool more_trbs_coming)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530351{
developer570c2a92020-09-08 18:59:56 +0200352 u32 total_packet_count;
353
developer80390532020-09-08 18:59:57 +0200354 /* MTK xHCI 0.96 contains some features from 1.0 */
355 if (ctrl->hci_version < 0x100 && !(ctrl->quirks & XHCI_MTK_HOST))
developer570c2a92020-09-08 18:59:56 +0200356 return ((td_total_len - transferred) >> 10);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530357
358 /* One TRB with a zero-length data packet. */
developer570c2a92020-09-08 18:59:56 +0200359 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
360 trb_buff_len == td_total_len)
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530361 return 0;
362
developer80390532020-09-08 18:59:57 +0200363 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
364 if ((ctrl->quirks & XHCI_MTK_HOST) && (ctrl->hci_version < 0x100))
365 trb_buff_len = 0;
366
developer570c2a92020-09-08 18:59:56 +0200367 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530368
developer570c2a92020-09-08 18:59:56 +0200369 /* Queueing functions don't count the current TRB into transferred */
370 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530371}
372
373/**
374 * Ring the doorbell of the End Point
375 *
376 * @param udev pointer to the USB device structure
377 * @param ep_index index of the endpoint
378 * @param start_cycle cycle flag of the first TRB
379 * @param start_trb pionter to the first TRB
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100380 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530381 */
382static void giveback_first_trb(struct usb_device *udev, int ep_index,
383 int start_cycle,
384 struct xhci_generic_trb *start_trb)
385{
Simon Glassa49e27b2015-03-25 12:22:49 -0600386 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530387
388 /*
389 * Pass all the TRBs to the hardware at once and make sure this write
390 * isn't reordered.
391 */
392 if (start_cycle)
393 start_trb->field[3] |= cpu_to_le32(start_cycle);
394 else
395 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
396
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300397 xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530398
399 /* Ringing EP doorbell here */
400 xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
401 DB_VALUE(ep_index, 0));
402
403 return;
404}
405
406/**** POLLING mechanism for XHCI ****/
407
408/**
409 * Finalizes a handled event TRB by advancing our dequeue pointer and giving
410 * the TRB back to the hardware for recycling. Must call this exactly once at
411 * the end of each event handler, and not touch the TRB again afterwards.
412 *
413 * @param ctrl Host controller data structure
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100414 * Return: none
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530415 */
416void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
417{
Mark Kettenisfac410c2023-01-21 20:27:55 +0100418 dma_addr_t deq;
419
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530420 /* Advance our dequeue pointer to the next event */
421 inc_deq(ctrl, ctrl->event_ring);
422
423 /* Inform the hardware */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100424 deq = xhci_trb_virt_to_dma(ctrl->event_ring->deq_seg,
425 ctrl->event_ring->dequeue);
426 xhci_writeq(&ctrl->ir_set->erst_dequeue, deq | ERST_EHB);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530427}
428
429/**
430 * Checks if there is a new event to handle on the event ring.
431 *
432 * @param ctrl Host controller data structure
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100433 * Return: 0 if failure else 1 on success
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530434 */
435static int event_ready(struct xhci_ctrl *ctrl)
436{
437 union xhci_trb *event;
438
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300439 xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
440 sizeof(union xhci_trb));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530441
442 event = ctrl->event_ring->dequeue;
443
444 /* Does the HC or OS own the TRB? */
445 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
446 ctrl->event_ring->cycle_state)
447 return 0;
448
449 return 1;
450}
451
452/**
453 * Waits for a specific type of event and returns it. Discards unexpected
454 * events. Caller *must* call xhci_acknowledge_event() after it is finished
455 * processing the event, and must not access the returned pointer afterwards.
456 *
457 * @param ctrl Host controller data structure
458 * @param expected TRB type expected from Event TRB
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100459 * Return: pointer to event trb
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530460 */
461union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
462{
463 trb_type type;
464 unsigned long ts = get_timer(0);
465
466 do {
467 union xhci_trb *event = ctrl->event_ring->dequeue;
468
469 if (!event_ready(ctrl))
470 continue;
471
472 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
Hector Martin191772f2023-10-29 15:37:39 +0900473 if (type == expected ||
474 (expected == TRB_NONE && type != TRB_PORT_STATUS))
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530475 return event;
476
477 if (type == TRB_PORT_STATUS)
478 /* TODO: remove this once enumeration has been reworked */
479 /*
480 * Port status change events always have a
481 * successful completion code
482 */
483 BUG_ON(GET_COMP_CODE(
484 le32_to_cpu(event->generic.field[2])) !=
485 COMP_SUCCESS);
486 else
487 printf("Unexpected XHCI event TRB, skipping... "
488 "(%08x %08x %08x %08x)\n",
489 le32_to_cpu(event->generic.field[0]),
490 le32_to_cpu(event->generic.field[1]),
491 le32_to_cpu(event->generic.field[2]),
492 le32_to_cpu(event->generic.field[3]));
493
494 xhci_acknowledge_event(ctrl);
495 } while (get_timer(ts) < XHCI_TIMEOUT);
496
497 if (expected == TRB_TRANSFER)
498 return NULL;
499
Hector Martin27783072023-10-29 15:37:43 +0900500 printf("XHCI timeout on event type %d...\n", expected);
501
502 return NULL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530503}
504
505/*
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200506 * Send reset endpoint command for given endpoint. This recovers from a
507 * halted endpoint (e.g. due to a stall error).
508 */
509static void reset_ep(struct usb_device *udev, int ep_index)
510{
511 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
512 struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
513 union xhci_trb *event;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100514 u64 addr;
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200515 u32 field;
516
517 printf("Resetting EP %d...\n", ep_index);
Mark Kettenisfac410c2023-01-21 20:27:55 +0100518 xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_RESET_EP);
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200519 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900520 if (!event)
521 return;
522
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200523 field = le32_to_cpu(event->trans_event.flags);
524 BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
525 xhci_acknowledge_event(ctrl);
526
Mark Kettenisfac410c2023-01-21 20:27:55 +0100527 addr = xhci_trb_virt_to_dma(ring->enq_seg,
528 (void *)((uintptr_t)ring->enqueue | ring->cycle_state));
529 xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200530 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900531 if (!event)
532 return;
533
Marek Vasut052b5fb2023-11-23 00:50:52 +0100534 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags)) != udev->slot_id ||
535 GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)) != COMP_SUCCESS);
Stefan Agnerec2b73d2021-09-27 14:42:58 +0200536 xhci_acknowledge_event(ctrl);
537}
538
539/*
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530540 * Stops transfer processing for an endpoint and throws away all unprocessed
541 * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
542 * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
543 * ring the doorbell, causing this endpoint to start working again.
544 * (Careful: This will BUG() when there was no transfer in progress. Shouldn't
545 * happen in practice for current uses and is too complicated to fix right now.)
546 */
547static void abort_td(struct usb_device *udev, int ep_index)
548{
Simon Glassa49e27b2015-03-25 12:22:49 -0600549 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530550 struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
551 union xhci_trb *event;
Hector Martinf28db6b2023-10-29 15:37:40 +0900552 xhci_comp_code comp;
Hector Martin191772f2023-10-29 15:37:39 +0900553 trb_type type;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100554 u64 addr;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530555 u32 field;
556
Mark Kettenisfac410c2023-01-21 20:27:55 +0100557 xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_STOP_RING);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530558
Hector Martin191772f2023-10-29 15:37:39 +0900559 event = xhci_wait_for_event(ctrl, TRB_NONE);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900560 if (!event)
561 return;
562
Hector Martin191772f2023-10-29 15:37:39 +0900563 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
564 if (type == TRB_TRANSFER) {
565 field = le32_to_cpu(event->trans_event.flags);
566 BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
567 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
Marek Vasut052b5fb2023-11-23 00:50:52 +0100568 BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len != COMP_STOP)));
Hector Martin191772f2023-10-29 15:37:39 +0900569 xhci_acknowledge_event(ctrl);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530570
Hector Martin191772f2023-10-29 15:37:39 +0900571 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
572 if (!event)
573 return;
574 type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
Hector Martinc5d7dac2023-10-29 15:37:38 +0900575
Hector Martin191772f2023-10-29 15:37:39 +0900576 } else {
577 printf("abort_td: Expected a TRB_TRANSFER TRB first\n");
578 }
579
Hector Martinf28db6b2023-10-29 15:37:40 +0900580 comp = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
Hector Martin191772f2023-10-29 15:37:39 +0900581 BUG_ON(type != TRB_COMPLETION ||
Marek Vasut052b5fb2023-11-23 00:50:52 +0100582 TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags)) != udev->slot_id ||
583 (comp != COMP_SUCCESS && comp != COMP_CTX_STATE));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530584 xhci_acknowledge_event(ctrl);
585
Mark Kettenisfac410c2023-01-21 20:27:55 +0100586 addr = xhci_trb_virt_to_dma(ring->enq_seg,
587 (void *)((uintptr_t)ring->enqueue | ring->cycle_state));
588 xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530589 event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
Hector Martinc5d7dac2023-10-29 15:37:38 +0900590 if (!event)
591 return;
592
Marek Vasut052b5fb2023-11-23 00:50:52 +0100593 BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags)) != udev->slot_id ||
594 GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)) != COMP_SUCCESS);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530595 xhci_acknowledge_event(ctrl);
596}
597
598static void record_transfer_result(struct usb_device *udev,
599 union xhci_trb *event, int length)
600{
601 udev->act_len = min(length, length -
Masahiro Yamadadb204642014-11-07 03:03:31 +0900602 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530603
604 switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
605 case COMP_SUCCESS:
606 BUG_ON(udev->act_len != length);
607 /* fallthrough */
608 case COMP_SHORT_TX:
609 udev->status = 0;
610 break;
611 case COMP_STALL:
612 udev->status = USB_ST_STALLED;
613 break;
614 case COMP_DB_ERR:
615 case COMP_TRB_ERR:
616 udev->status = USB_ST_BUF_ERR;
617 break;
618 case COMP_BABBLE:
619 udev->status = USB_ST_BABBLE_DET;
620 break;
621 default:
622 udev->status = 0x80; /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
623 }
624}
625
626/**** Bulk and Control transfer methods ****/
627/**
628 * Queues up the BULK Request
629 *
630 * @param udev pointer to the USB device structure
631 * @param pipe contains the DIR_IN or OUT , devnum
632 * @param length length of the buffer
633 * @param buffer buffer to be read/written based on the request
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100634 * Return: returns 0 if successful else -1 on failure
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530635 */
636int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
637 int length, void *buffer)
638{
639 int num_trbs = 0;
640 struct xhci_generic_trb *start_trb;
Gustavo A. R. Silva0a1ef7c2018-01-20 02:37:31 -0600641 bool first_trb = false;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530642 int start_cycle;
643 u32 field = 0;
644 u32 length_field = 0;
Simon Glassa49e27b2015-03-25 12:22:49 -0600645 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530646 int slot_id = udev->slot_id;
647 int ep_index;
648 struct xhci_virt_device *virt_dev;
649 struct xhci_ep_ctx *ep_ctx;
650 struct xhci_ring *ring; /* EP transfer ring */
651 union xhci_trb *event;
652
653 int running_total, trb_buff_len;
developer570c2a92020-09-08 18:59:56 +0200654 bool more_trbs_coming = true;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530655 int maxpacketsize;
656 u64 addr;
657 int ret;
658 u32 trb_fields[4];
Mark Kettenisfac410c2023-01-21 20:27:55 +0100659 u64 buf_64 = xhci_dma_map(ctrl, buffer, length);
660 dma_addr_t last_transfer_trb_addr;
Ran Wanga0505832020-11-18 15:49:02 +0800661 int available_length;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530662
663 debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
664 udev, pipe, buffer, length);
665
Ran Wanga0505832020-11-18 15:49:02 +0800666 available_length = length;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530667 ep_index = usb_pipe_ep_index(pipe);
668 virt_dev = ctrl->devs[slot_id];
669
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300670 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
671 virt_dev->out_ctx->size);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530672
673 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
674
Hector Martin6d204372023-10-29 15:37:41 +0900675 /*
676 * If the endpoint was halted due to a prior error, resume it before
677 * the next transfer. It is the responsibility of the upper layer to
678 * have dealt with whatever caused the error.
679 */
680 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
681 reset_ep(udev, ep_index);
682
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530683 ring = virt_dev->eps[ep_index].ring;
Janne Grunau727c6b52024-04-04 08:25:51 +0200684 if (!ring)
685 return -EINVAL;
686
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530687 /*
688 * How much data is (potentially) left before the 64KB boundary?
689 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
690 * that the buffer should not span 64KB boundary. if so
691 * we send request in more than 1 TRB by chaining them.
692 */
693 running_total = TRB_MAX_BUFF_SIZE -
Mark Kettenisfac410c2023-01-21 20:27:55 +0100694 (lower_32_bits(buf_64) & (TRB_MAX_BUFF_SIZE - 1));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530695 trb_buff_len = running_total;
696 running_total &= TRB_MAX_BUFF_SIZE - 1;
697
698 /*
699 * If there's some data on this 64KB chunk, or we have to send a
700 * zero-length transfer, we need at least one TRB
701 */
702 if (running_total != 0 || length == 0)
703 num_trbs++;
704
705 /* How many more 64KB chunks to transfer, how many more TRBs? */
706 while (running_total < length) {
707 num_trbs++;
708 running_total += TRB_MAX_BUFF_SIZE;
709 }
710
711 /*
712 * XXX: Calling routine prepare_ring() called in place of
713 * prepare_trasfer() as there in 'Linux' since we are not
714 * maintaining multiple TDs/transfer at the same time.
715 */
716 ret = prepare_ring(ctrl, ring,
717 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
718 if (ret < 0)
719 return ret;
720
721 /*
722 * Don't give the first TRB to the hardware (by toggling the cycle bit)
723 * until we've finished creating all the other TRBs. The ring's cycle
724 * state may change as we enqueue the other TRBs, so save it too.
725 */
726 start_trb = &ring->enqueue->generic;
727 start_cycle = ring->cycle_state;
728
729 running_total = 0;
730 maxpacketsize = usb_maxpacket(udev, pipe);
731
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530732 /* How much data is in the first TRB? */
733 /*
734 * How much data is (potentially) left before the 64KB boundary?
735 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
736 * that the buffer should not span 64KB boundary. if so
737 * we send request in more than 1 TRB by chaining them.
738 */
Mark Kettenisfac410c2023-01-21 20:27:55 +0100739 addr = buf_64;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530740
741 if (trb_buff_len > length)
742 trb_buff_len = length;
743
744 first_trb = true;
745
746 /* flush the buffer before use */
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300747 xhci_flush_cache((uintptr_t)buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530748
749 /* Queue the first TRB, even if it's zero-length */
750 do {
751 u32 remainder = 0;
752 field = 0;
753 /* Don't change the cycle bit of the first TRB until later */
754 if (first_trb) {
755 first_trb = false;
756 if (start_cycle == 0)
757 field |= TRB_CYCLE;
758 } else {
759 field |= ring->cycle_state;
760 }
761
762 /*
763 * Chain all the TRBs together; clear the chain bit in the last
764 * TRB to indicate it's the last TRB in the chain.
765 */
developer570c2a92020-09-08 18:59:56 +0200766 if (num_trbs > 1) {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530767 field |= TRB_CHAIN;
developer570c2a92020-09-08 18:59:56 +0200768 } else {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530769 field |= TRB_IOC;
developer570c2a92020-09-08 18:59:56 +0200770 more_trbs_coming = false;
771 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530772
773 /* Only set interrupt on short packet for IN endpoints */
774 if (usb_pipein(pipe))
775 field |= TRB_ISP;
776
777 /* Set the TRB length, TD size, and interrupter fields. */
developer570c2a92020-09-08 18:59:56 +0200778 remainder = xhci_td_remainder(ctrl, running_total, trb_buff_len,
779 length, maxpacketsize,
780 more_trbs_coming);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530781
developer6cabb142020-09-08 19:00:00 +0200782 length_field = (TRB_LEN(trb_buff_len) |
developer570c2a92020-09-08 18:59:56 +0200783 TRB_TD_SIZE(remainder) |
developer6cabb142020-09-08 19:00:00 +0200784 TRB_INTR_TARGET(0));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530785
786 trb_fields[0] = lower_32_bits(addr);
787 trb_fields[1] = upper_32_bits(addr);
788 trb_fields[2] = length_field;
developer497dcfa2020-09-08 18:59:59 +0200789 trb_fields[3] = field | TRB_TYPE(TRB_NORMAL);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530790
Ran Wanga0505832020-11-18 15:49:02 +0800791 last_transfer_trb_addr = queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530792
793 --num_trbs;
794
795 running_total += trb_buff_len;
796
797 /* Calculate length for next transfer */
798 addr += trb_buff_len;
799 trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
Godfrey Mwangi4200c6c2023-08-04 12:00:39 -0700800
801 schedule();
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530802 } while (running_total < length);
803
804 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
805
Ran Wanga0505832020-11-18 15:49:02 +0800806again:
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530807 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
808 if (!event) {
809 debug("XHCI bulk transfer timed out, aborting...\n");
810 abort_td(udev, ep_index);
811 udev->status = USB_ST_NAK_REC; /* closest thing to a timeout */
812 udev->act_len = 0;
813 return -ETIMEDOUT;
814 }
Ran Wanga0505832020-11-18 15:49:02 +0800815
Stefan Roese5e3c1462021-01-15 08:52:56 +0100816 if ((uintptr_t)(le64_to_cpu(event->trans_event.buffer)) !=
Mark Kettenisfac410c2023-01-21 20:27:55 +0100817 (uintptr_t)last_transfer_trb_addr) {
Ran Wanga0505832020-11-18 15:49:02 +0800818 available_length -=
819 (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len));
820 xhci_acknowledge_event(ctrl);
821 goto again;
822 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530823
Ran Wanga0505832020-11-18 15:49:02 +0800824 field = le32_to_cpu(event->trans_event.flags);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530825 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
826 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530827
Ran Wanga0505832020-11-18 15:49:02 +0800828 record_transfer_result(udev, event, available_length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530829 xhci_acknowledge_event(ctrl);
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300830 xhci_inval_cache((uintptr_t)buffer, length);
Mark Kettenisfac410c2023-01-21 20:27:55 +0100831 xhci_dma_unmap(ctrl, buf_64, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530832
833 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
834}
835
836/**
837 * Queues up the Control Transfer Request
838 *
839 * @param udev pointer to the USB device structure
840 * @param pipe contains the DIR_IN or OUT , devnum
841 * @param req request type
842 * @param length length of the buffer
843 * @param buffer buffer to be read/written based on the request
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100844 * Return: returns 0 if successful else error code on failure
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530845 */
846int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
847 struct devrequest *req, int length,
848 void *buffer)
849{
850 int ret;
851 int start_cycle;
852 int num_trbs;
853 u32 field;
854 u32 length_field;
855 u64 buf_64 = 0;
856 struct xhci_generic_trb *start_trb;
Simon Glassa49e27b2015-03-25 12:22:49 -0600857 struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530858 int slot_id = udev->slot_id;
859 int ep_index;
860 u32 trb_fields[4];
861 struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
862 struct xhci_ring *ep_ring;
863 union xhci_trb *event;
developer570c2a92020-09-08 18:59:56 +0200864 u32 remainder;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530865
866 debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
867 req->request, req->request,
868 req->requesttype, req->requesttype,
869 le16_to_cpu(req->value), le16_to_cpu(req->value),
870 le16_to_cpu(req->index));
871
872 ep_index = usb_pipe_ep_index(pipe);
873
874 ep_ring = virt_dev->eps[ep_index].ring;
Janne Grunau727c6b52024-04-04 08:25:51 +0200875 if (!ep_ring)
876 return -EINVAL;
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530877
878 /*
879 * Check to see if the max packet size for the default control
880 * endpoint changed during FS device enumeration
881 */
882 if (udev->speed == USB_SPEED_FULL) {
883 ret = xhci_check_maxpacket(udev);
884 if (ret < 0)
885 return ret;
886 }
887
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300888 xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
889 virt_dev->out_ctx->size);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530890
891 struct xhci_ep_ctx *ep_ctx = NULL;
892 ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
893
894 /* 1 TRB for setup, 1 for status */
895 num_trbs = 2;
896 /*
897 * Don't need to check if we need additional event data and normal TRBs,
898 * since data in control transfers will never get bigger than 16MB
899 * XXX: can we get a buffer that crosses 64KB boundaries?
900 */
901
902 if (length > 0)
903 num_trbs++;
904 /*
905 * XXX: Calling routine prepare_ring() called in place of
906 * prepare_trasfer() as there in 'Linux' since we are not
907 * maintaining multiple TDs/transfer at the same time.
908 */
909 ret = prepare_ring(ctrl, ep_ring,
910 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
911
912 if (ret < 0)
913 return ret;
914
915 /*
916 * Don't give the first TRB to the hardware (by toggling the cycle bit)
917 * until we've finished creating all the other TRBs. The ring's cycle
918 * state may change as we enqueue the other TRBs, so save it too.
919 */
920 start_trb = &ep_ring->enqueue->generic;
921 start_cycle = ep_ring->cycle_state;
922
923 debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
924
925 /* Queue setup TRB - see section 6.4.1.2.1 */
926 /* FIXME better way to translate setup_packet into two u32 fields? */
927 field = 0;
developer497dcfa2020-09-08 18:59:59 +0200928 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530929 if (start_cycle == 0)
930 field |= 0x1;
931
932 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
developer80390532020-09-08 18:59:57 +0200933 if (ctrl->hci_version >= 0x100 || ctrl->quirks & XHCI_MTK_HOST) {
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530934 if (length > 0) {
935 if (req->requesttype & USB_DIR_IN)
developer57c052b2020-09-08 19:00:01 +0200936 field |= TRB_TX_TYPE(TRB_DATA_IN);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530937 else
developer57c052b2020-09-08 19:00:01 +0200938 field |= TRB_TX_TYPE(TRB_DATA_OUT);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530939 }
940 }
941
Stefan Roeseede9de12021-04-06 12:10:18 +0200942 debug("req->requesttype = %d, req->request = %d, req->value = %d, req->index = %d, req->length = %d\n",
943 req->requesttype, req->request, le16_to_cpu(req->value),
944 le16_to_cpu(req->index), le16_to_cpu(req->length));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530945
946 trb_fields[0] = req->requesttype | req->request << 8 |
947 le16_to_cpu(req->value) << 16;
948 trb_fields[1] = le16_to_cpu(req->index) |
949 le16_to_cpu(req->length) << 16;
950 /* TRB_LEN | (TRB_INTR_TARGET) */
developer6cabb142020-09-08 19:00:00 +0200951 trb_fields[2] = (TRB_LEN(8) | TRB_INTR_TARGET(0));
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530952 /* Immediate data in pointer */
953 trb_fields[3] = field;
954 queue_trb(ctrl, ep_ring, true, trb_fields);
955
956 /* Re-initializing field to zero */
957 field = 0;
958 /* If there's data, queue data TRBs */
959 /* Only set interrupt on short packet for IN endpoints */
960 if (usb_pipein(pipe))
developer497dcfa2020-09-08 18:59:59 +0200961 field = TRB_ISP | TRB_TYPE(TRB_DATA);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530962 else
developer497dcfa2020-09-08 18:59:59 +0200963 field = TRB_TYPE(TRB_DATA);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530964
developer570c2a92020-09-08 18:59:56 +0200965 remainder = xhci_td_remainder(ctrl, 0, length, length,
966 usb_maxpacket(udev, pipe), true);
developer6cabb142020-09-08 19:00:00 +0200967 length_field = TRB_LEN(length) | TRB_TD_SIZE(remainder) |
968 TRB_INTR_TARGET(0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530969 debug("length_field = %d, length = %d,"
970 "xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
developer6cabb142020-09-08 19:00:00 +0200971 length_field, TRB_LEN(length),
developer570c2a92020-09-08 18:59:56 +0200972 TRB_TD_SIZE(remainder), 0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530973
974 if (length > 0) {
975 if (req->requesttype & USB_DIR_IN)
976 field |= TRB_DIR_IN;
Mark Kettenisfac410c2023-01-21 20:27:55 +0100977 buf_64 = xhci_dma_map(ctrl, buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530978
979 trb_fields[0] = lower_32_bits(buf_64);
980 trb_fields[1] = upper_32_bits(buf_64);
981 trb_fields[2] = length_field;
982 trb_fields[3] = field | ep_ring->cycle_state;
983
Sergey Temerkhanov38593462015-04-01 17:18:45 +0300984 xhci_flush_cache((uintptr_t)buffer, length);
Vivek Gautam4912dcc2013-09-14 14:02:45 +0530985 queue_trb(ctrl, ep_ring, true, trb_fields);
986 }
987
988 /*
989 * Queue status TRB -
990 * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
991 */
992
993 /* If the device sent data, the status stage is an OUT transfer */
994 field = 0;
995 if (length > 0 && req->requesttype & USB_DIR_IN)
996 field = 0;
997 else
998 field = TRB_DIR_IN;
999
1000 trb_fields[0] = 0;
1001 trb_fields[1] = 0;
developer6cabb142020-09-08 19:00:00 +02001002 trb_fields[2] = TRB_INTR_TARGET(0);
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301003 /* Event on completion */
1004 trb_fields[3] = field | TRB_IOC |
developer497dcfa2020-09-08 18:59:59 +02001005 TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state;
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301006
1007 queue_trb(ctrl, ep_ring, false, trb_fields);
1008
1009 giveback_first_trb(udev, ep_index, start_cycle, start_trb);
1010
1011 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1012 if (!event)
1013 goto abort;
1014 field = le32_to_cpu(event->trans_event.flags);
1015
1016 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1017 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1018
1019 record_transfer_result(udev, event, length);
1020 xhci_acknowledge_event(ctrl);
Stefan Agnerec2b73d2021-09-27 14:42:58 +02001021 if (udev->status == USB_ST_STALLED) {
1022 reset_ep(udev, ep_index);
1023 return -EPIPE;
1024 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301025
1026 /* Invalidate buffer to make it available to usb-core */
Mark Kettenisfac410c2023-01-21 20:27:55 +01001027 if (length > 0) {
Sergey Temerkhanov38593462015-04-01 17:18:45 +03001028 xhci_inval_cache((uintptr_t)buffer, length);
Mark Kettenisfac410c2023-01-21 20:27:55 +01001029 xhci_dma_unmap(ctrl, buf_64, length);
1030 }
Vivek Gautam4912dcc2013-09-14 14:02:45 +05301031
1032 if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
1033 == COMP_SHORT_TX) {
1034 /* Short data stage, clear up additional status stage event */
1035 event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1036 if (!event)
1037 goto abort;
1038 BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1039 BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1040 xhci_acknowledge_event(ctrl);
1041 }
1042
1043 return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
1044
1045abort:
1046 debug("XHCI control transfer timed out, aborting...\n");
1047 abort_td(udev, ep_index);
1048 udev->status = USB_ST_NAK_REC;
1049 udev->act_len = 0;
1050 return -ETIMEDOUT;
1051}