blob: bbb18e89e0f521ceb63f0035980cd3e15adbf9a7 [file] [log] [blame]
Faiz Abbas5cc51072019-10-15 18:24:36 +05301// SPDX-License-Identifier: GPL-2.0+
2/**
3 * ufs.c - Universal Flash Subsystem (UFS) driver
4 *
5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
6 * to u-boot.
7 *
8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9 */
10
11#include <charset.h>
12#include <common.h>
13#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060014#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070016#include <dm/devres.h>
Faiz Abbas5cc51072019-10-15 18:24:36 +053017#include <dm/lists.h>
18#include <dm/device-internal.h>
19#include <malloc.h>
20#include <hexdump.h>
21#include <scsi.h>
22
Masahiro Yamada6373a172020-02-14 16:40:19 +090023#include <linux/dma-mapping.h>
Faiz Abbas5cc51072019-10-15 18:24:36 +053024
25#include "ufs.h"
26
27#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
28 UTP_TASK_REQ_COMPL |\
29 UFSHCD_ERROR_MASK)
30/* maximum number of link-startup retries */
31#define DME_LINKSTARTUP_RETRIES 3
32
33/* maximum number of retries for a general UIC command */
34#define UFS_UIC_COMMAND_RETRIES 3
35
36/* Query request retries */
37#define QUERY_REQ_RETRIES 3
38/* Query request timeout */
39#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
40
41/* maximum timeout in ms for a general UIC command */
42#define UFS_UIC_CMD_TIMEOUT 1000
43/* NOP OUT retries waiting for NOP IN response */
44#define NOP_OUT_RETRIES 10
45/* Timeout after 30 msecs if NOP OUT hangs without response */
46#define NOP_OUT_TIMEOUT 30 /* msecs */
47
48/* Only use one Task Tag for all requests */
49#define TASK_TAG 0
50
51/* Expose the flag value from utp_upiu_query.value */
52#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
53
54#define MAX_PRDT_ENTRY 262144
55
56/* maximum bytes per request */
57#define UFS_MAX_BYTES (128 * 256 * 1024)
58
59static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
60static inline void ufshcd_hba_stop(struct ufs_hba *hba);
61static int ufshcd_hba_enable(struct ufs_hba *hba);
62
63/*
64 * ufshcd_wait_for_register - wait for register value to change
65 */
66static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
67 u32 val, unsigned long timeout_ms)
68{
69 int err = 0;
70 unsigned long start = get_timer(0);
71
72 /* ignore bits that we don't intend to wait on */
73 val = val & mask;
74
75 while ((ufshcd_readl(hba, reg) & mask) != val) {
76 if (get_timer(start) > timeout_ms) {
77 if ((ufshcd_readl(hba, reg) & mask) != val)
78 err = -ETIMEDOUT;
79 break;
80 }
81 }
82
83 return err;
84}
85
86/**
87 * ufshcd_init_pwr_info - setting the POR (power on reset)
88 * values in hba power info
89 */
90static void ufshcd_init_pwr_info(struct ufs_hba *hba)
91{
92 hba->pwr_info.gear_rx = UFS_PWM_G1;
93 hba->pwr_info.gear_tx = UFS_PWM_G1;
94 hba->pwr_info.lane_rx = 1;
95 hba->pwr_info.lane_tx = 1;
96 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
97 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
98 hba->pwr_info.hs_rate = 0;
99}
100
101/**
102 * ufshcd_print_pwr_info - print power params as saved in hba
103 * power info
104 */
105static void ufshcd_print_pwr_info(struct ufs_hba *hba)
106{
107 static const char * const names[] = {
108 "INVALID MODE",
109 "FAST MODE",
110 "SLOW_MODE",
111 "INVALID MODE",
112 "FASTAUTO_MODE",
113 "SLOWAUTO_MODE",
114 "INVALID MODE",
115 };
116
117 dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
118 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
119 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
120 names[hba->pwr_info.pwr_rx],
121 names[hba->pwr_info.pwr_tx],
122 hba->pwr_info.hs_rate);
123}
124
125/**
126 * ufshcd_ready_for_uic_cmd - Check if controller is ready
127 * to accept UIC commands
128 */
129static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
130{
131 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
132 return true;
133 else
134 return false;
135}
136
137/**
138 * ufshcd_get_uic_cmd_result - Get the UIC command result
139 */
140static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
141{
142 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
143 MASK_UIC_COMMAND_RESULT;
144}
145
146/**
147 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
148 */
149static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
150{
151 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
152}
153
154/**
155 * ufshcd_is_device_present - Check if any device connected to
156 * the host controller
157 */
158static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
159{
160 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
161 DEVICE_PRESENT) ? true : false;
162}
163
164/**
165 * ufshcd_send_uic_cmd - UFS Interconnect layer command API
166 *
167 */
168static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
169{
170 unsigned long start = 0;
171 u32 intr_status;
172 u32 enabled_intr_status;
173
174 if (!ufshcd_ready_for_uic_cmd(hba)) {
175 dev_err(hba->dev,
176 "Controller not ready to accept UIC commands\n");
177 return -EIO;
178 }
179
180 debug("sending uic command:%d\n", uic_cmd->command);
181
182 /* Write Args */
183 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
184 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
185 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
186
187 /* Write UIC Cmd */
188 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
189 REG_UIC_COMMAND);
190
191 start = get_timer(0);
192 do {
193 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
194 enabled_intr_status = intr_status & hba->intr_mask;
195 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
196
197 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
198 dev_err(hba->dev,
199 "Timedout waiting for UIC response\n");
200
201 return -ETIMEDOUT;
202 }
203
204 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
205 dev_err(hba->dev, "Error in status:%08x\n",
206 enabled_intr_status);
207
208 return -1;
209 }
210 } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
211
212 uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
213 uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
214
215 debug("Sent successfully\n");
216
217 return 0;
218}
219
220/**
221 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
222 *
223 */
224int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
225 u32 mib_val, u8 peer)
226{
227 struct uic_command uic_cmd = {0};
228 static const char *const action[] = {
229 "dme-set",
230 "dme-peer-set"
231 };
232 const char *set = action[!!peer];
233 int ret;
234 int retries = UFS_UIC_COMMAND_RETRIES;
235
236 uic_cmd.command = peer ?
237 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
238 uic_cmd.argument1 = attr_sel;
239 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
240 uic_cmd.argument3 = mib_val;
241
242 do {
243 /* for peer attributes we retry upon failure */
244 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
245 if (ret)
246 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
247 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
248 } while (ret && peer && --retries);
249
250 if (ret)
251 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
252 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
253 UFS_UIC_COMMAND_RETRIES - retries);
254
255 return ret;
256}
257
258/**
259 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
260 *
261 */
262int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
263 u32 *mib_val, u8 peer)
264{
265 struct uic_command uic_cmd = {0};
266 static const char *const action[] = {
267 "dme-get",
268 "dme-peer-get"
269 };
270 const char *get = action[!!peer];
271 int ret;
272 int retries = UFS_UIC_COMMAND_RETRIES;
273
274 uic_cmd.command = peer ?
275 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
276 uic_cmd.argument1 = attr_sel;
277
278 do {
279 /* for peer attributes we retry upon failure */
280 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
281 if (ret)
282 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
283 get, UIC_GET_ATTR_ID(attr_sel), ret);
284 } while (ret && peer && --retries);
285
286 if (ret)
287 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
288 get, UIC_GET_ATTR_ID(attr_sel),
289 UFS_UIC_COMMAND_RETRIES - retries);
290
291 if (mib_val && !ret)
292 *mib_val = uic_cmd.argument3;
293
294 return ret;
295}
296
297static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
298{
299 u32 tx_lanes, i, err = 0;
300
301 if (!peer)
302 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
303 &tx_lanes);
304 else
305 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
306 &tx_lanes);
307 for (i = 0; i < tx_lanes; i++) {
308 if (!peer)
309 err = ufshcd_dme_set(hba,
310 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
311 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
312 0);
313 else
314 err = ufshcd_dme_peer_set(hba,
315 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
316 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
317 0);
318 if (err) {
319 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
320 __func__, peer, i, err);
321 break;
322 }
323 }
324
325 return err;
326}
327
328static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
329{
330 return ufshcd_disable_tx_lcc(hba, true);
331}
332
333/**
334 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
335 *
336 */
337static int ufshcd_dme_link_startup(struct ufs_hba *hba)
338{
339 struct uic_command uic_cmd = {0};
340 int ret;
341
342 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
343
344 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
345 if (ret)
346 dev_dbg(hba->dev,
347 "dme-link-startup: error code %d\n", ret);
348 return ret;
349}
350
351/**
352 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
353 *
354 */
355static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
356{
357 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
358}
359
360/**
361 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
362 */
363static inline int ufshcd_get_lists_status(u32 reg)
364{
365 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
366}
367
368/**
369 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
370 * When run-stop registers are set to 1, it indicates the
371 * host controller that it can process the requests
372 */
373static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
374{
375 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
376 REG_UTP_TASK_REQ_LIST_RUN_STOP);
377 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
378 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
379}
380
381/**
382 * ufshcd_enable_intr - enable interrupts
383 */
384static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
385{
386 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
387 u32 rw;
388
389 if (hba->version == UFSHCI_VERSION_10) {
390 rw = set & INTERRUPT_MASK_RW_VER_10;
391 set = rw | ((set ^ intrs) & intrs);
392 } else {
393 set |= intrs;
394 }
395
396 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
397
398 hba->intr_mask = set;
399}
400
401/**
402 * ufshcd_make_hba_operational - Make UFS controller operational
403 *
404 * To bring UFS host controller to operational state,
405 * 1. Enable required interrupts
406 * 2. Configure interrupt aggregation
407 * 3. Program UTRL and UTMRL base address
408 * 4. Configure run-stop-registers
409 *
410 */
411static int ufshcd_make_hba_operational(struct ufs_hba *hba)
412{
413 int err = 0;
414 u32 reg;
415
416 /* Enable required interrupts */
417 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
418
419 /* Disable interrupt aggregation */
420 ufshcd_disable_intr_aggr(hba);
421
422 /* Configure UTRL and UTMRL base address registers */
423 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
424 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
425 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
426 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
427 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
428 REG_UTP_TASK_REQ_LIST_BASE_L);
429 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
430 REG_UTP_TASK_REQ_LIST_BASE_H);
431
432 /*
433 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
434 */
435 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
436 if (!(ufshcd_get_lists_status(reg))) {
437 ufshcd_enable_run_stop_reg(hba);
438 } else {
439 dev_err(hba->dev,
440 "Host controller not ready to process requests");
441 err = -EIO;
442 goto out;
443 }
444
445out:
446 return err;
447}
448
449/**
450 * ufshcd_link_startup - Initialize unipro link startup
451 */
452static int ufshcd_link_startup(struct ufs_hba *hba)
453{
454 int ret;
455 int retries = DME_LINKSTARTUP_RETRIES;
456 bool link_startup_again = true;
457
458link_startup:
459 do {
460 ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
461
462 ret = ufshcd_dme_link_startup(hba);
463
464 /* check if device is detected by inter-connect layer */
465 if (!ret && !ufshcd_is_device_present(hba)) {
466 dev_err(hba->dev, "%s: Device not present\n", __func__);
467 ret = -ENXIO;
468 goto out;
469 }
470
471 /*
472 * DME link lost indication is only received when link is up,
473 * but we can't be sure if the link is up until link startup
474 * succeeds. So reset the local Uni-Pro and try again.
475 */
476 if (ret && ufshcd_hba_enable(hba))
477 goto out;
478 } while (ret && retries--);
479
480 if (ret)
481 /* failed to get the link up... retire */
482 goto out;
483
484 if (link_startup_again) {
485 link_startup_again = false;
486 retries = DME_LINKSTARTUP_RETRIES;
487 goto link_startup;
488 }
489
490 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
491 ufshcd_init_pwr_info(hba);
492
493 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
494 ret = ufshcd_disable_device_tx_lcc(hba);
495 if (ret)
496 goto out;
497 }
498
499 /* Include any host controller configuration via UIC commands */
500 ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
501 if (ret)
502 goto out;
503
504 ret = ufshcd_make_hba_operational(hba);
505out:
506 if (ret)
507 dev_err(hba->dev, "link startup failed %d\n", ret);
508
509 return ret;
510}
511
512/**
513 * ufshcd_hba_stop - Send controller to reset state
514 */
515static inline void ufshcd_hba_stop(struct ufs_hba *hba)
516{
517 int err;
518
519 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
520 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
521 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
522 10);
523 if (err)
524 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
525}
526
527/**
528 * ufshcd_is_hba_active - Get controller state
529 */
530static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
531{
532 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
533 ? false : true;
534}
535
536/**
537 * ufshcd_hba_start - Start controller initialization sequence
538 */
539static inline void ufshcd_hba_start(struct ufs_hba *hba)
540{
541 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
542}
543
544/**
545 * ufshcd_hba_enable - initialize the controller
546 */
547static int ufshcd_hba_enable(struct ufs_hba *hba)
548{
549 int retry;
550
551 if (!ufshcd_is_hba_active(hba))
552 /* change controller state to "reset state" */
553 ufshcd_hba_stop(hba);
554
555 ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
556
557 /* start controller initialization sequence */
558 ufshcd_hba_start(hba);
559
560 /*
561 * To initialize a UFS host controller HCE bit must be set to 1.
562 * During initialization the HCE bit value changes from 1->0->1.
563 * When the host controller completes initialization sequence
564 * it sets the value of HCE bit to 1. The same HCE bit is read back
565 * to check if the controller has completed initialization sequence.
566 * So without this delay the value HCE = 1, set in the previous
567 * instruction might be read back.
568 * This delay can be changed based on the controller.
569 */
570 mdelay(1);
571
572 /* wait for the host controller to complete initialization */
573 retry = 10;
574 while (ufshcd_is_hba_active(hba)) {
575 if (retry) {
576 retry--;
577 } else {
578 dev_err(hba->dev, "Controller enable failed\n");
579 return -EIO;
580 }
581 mdelay(5);
582 }
583
584 /* enable UIC related interrupts */
585 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
586
587 ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
588
589 return 0;
590}
591
592/**
593 * ufshcd_host_memory_configure - configure local reference block with
594 * memory offsets
595 */
596static void ufshcd_host_memory_configure(struct ufs_hba *hba)
597{
598 struct utp_transfer_req_desc *utrdlp;
599 dma_addr_t cmd_desc_dma_addr;
600 u16 response_offset;
601 u16 prdt_offset;
602
603 utrdlp = hba->utrdl;
604 cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
605
606 utrdlp->command_desc_base_addr_lo =
607 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
608 utrdlp->command_desc_base_addr_hi =
609 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
610
611 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
612 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
613
614 utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
615 utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
616 utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
617
618 hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
619 hba->ucd_rsp_ptr =
620 (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
621 hba->ucd_prdt_ptr =
622 (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
623}
624
625/**
626 * ufshcd_memory_alloc - allocate memory for host memory space data structures
627 */
628static int ufshcd_memory_alloc(struct ufs_hba *hba)
629{
630 /* Allocate one Transfer Request Descriptor
631 * Should be aligned to 1k boundary.
632 */
633 hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
634 if (!hba->utrdl) {
635 dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
636 return -ENOMEM;
637 }
638
639 /* Allocate one Command Descriptor
640 * Should be aligned to 1k boundary.
641 */
642 hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
643 if (!hba->ucdl) {
644 dev_err(hba->dev, "Command descriptor memory allocation failed\n");
645 return -ENOMEM;
646 }
647
648 return 0;
649}
650
651/**
652 * ufshcd_get_intr_mask - Get the interrupt bit mask
653 */
654static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
655{
656 u32 intr_mask = 0;
657
658 switch (hba->version) {
659 case UFSHCI_VERSION_10:
660 intr_mask = INTERRUPT_MASK_ALL_VER_10;
661 break;
662 case UFSHCI_VERSION_11:
663 case UFSHCI_VERSION_20:
664 intr_mask = INTERRUPT_MASK_ALL_VER_11;
665 break;
666 case UFSHCI_VERSION_21:
667 default:
668 intr_mask = INTERRUPT_MASK_ALL_VER_21;
669 break;
670 }
671
672 return intr_mask;
673}
674
675/**
676 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
677 */
678static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
679{
680 return ufshcd_readl(hba, REG_UFS_VERSION);
681}
682
683/**
684 * ufshcd_get_upmcrs - Get the power mode change request status
685 */
686static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
687{
688 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
689}
690
691/**
692 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
693 * descriptor according to request
694 */
695static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
696 u32 *upiu_flags,
697 enum dma_data_direction cmd_dir)
698{
699 u32 data_direction;
700 u32 dword_0;
701
702 if (cmd_dir == DMA_FROM_DEVICE) {
703 data_direction = UTP_DEVICE_TO_HOST;
704 *upiu_flags = UPIU_CMD_FLAGS_READ;
705 } else if (cmd_dir == DMA_TO_DEVICE) {
706 data_direction = UTP_HOST_TO_DEVICE;
707 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
708 } else {
709 data_direction = UTP_NO_DATA_TRANSFER;
710 *upiu_flags = UPIU_CMD_FLAGS_NONE;
711 }
712
713 dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
714
715 /* Enable Interrupt for command */
716 dword_0 |= UTP_REQ_DESC_INT_CMD;
717
718 /* Transfer request descriptor header fields */
719 req_desc->header.dword_0 = cpu_to_le32(dword_0);
720 /* dword_1 is reserved, hence it is set to 0 */
721 req_desc->header.dword_1 = 0;
722 /*
723 * assigning invalid value for command status. Controller
724 * updates OCS on command completion, with the command
725 * status
726 */
727 req_desc->header.dword_2 =
728 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
729 /* dword_3 is reserved, hence it is set to 0 */
730 req_desc->header.dword_3 = 0;
731
732 req_desc->prd_table_length = 0;
733}
734
735static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
736 u32 upiu_flags)
737{
738 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
739 struct ufs_query *query = &hba->dev_cmd.query;
740 u16 len = be16_to_cpu(query->request.upiu_req.length);
741
742 /* Query request header */
743 ucd_req_ptr->header.dword_0 =
744 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
745 upiu_flags, 0, TASK_TAG);
746 ucd_req_ptr->header.dword_1 =
747 UPIU_HEADER_DWORD(0, query->request.query_func,
748 0, 0);
749
750 /* Data segment length only need for WRITE_DESC */
751 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
752 ucd_req_ptr->header.dword_2 =
753 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
754 else
755 ucd_req_ptr->header.dword_2 = 0;
756
757 /* Copy the Query Request buffer as is */
758 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
759
760 /* Copy the Descriptor */
761 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
762 memcpy(ucd_req_ptr + 1, query->descriptor, len);
763
764 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
765}
766
767static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
768{
769 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
770
771 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
772
773 /* command descriptor fields */
774 ucd_req_ptr->header.dword_0 =
775 UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
776 /* clear rest of the fields of basic header */
777 ucd_req_ptr->header.dword_1 = 0;
778 ucd_req_ptr->header.dword_2 = 0;
779
780 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
781}
782
783/**
784 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
785 * for Device Management Purposes
786 */
787static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
788 enum dev_cmd_type cmd_type)
789{
790 u32 upiu_flags;
791 int ret = 0;
792 struct utp_transfer_req_desc *req_desc = hba->utrdl;
793
794 hba->dev_cmd.type = cmd_type;
795
796 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
797 switch (cmd_type) {
798 case DEV_CMD_TYPE_QUERY:
799 ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
800 break;
801 case DEV_CMD_TYPE_NOP:
802 ufshcd_prepare_utp_nop_upiu(hba);
803 break;
804 default:
805 ret = -EINVAL;
806 }
807
808 return ret;
809}
810
811static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
812{
813 unsigned long start;
814 u32 intr_status;
815 u32 enabled_intr_status;
816
817 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
818
819 start = get_timer(0);
820 do {
821 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
822 enabled_intr_status = intr_status & hba->intr_mask;
823 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
824
825 if (get_timer(start) > QUERY_REQ_TIMEOUT) {
826 dev_err(hba->dev,
827 "Timedout waiting for UTP response\n");
828
829 return -ETIMEDOUT;
830 }
831
832 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
833 dev_err(hba->dev, "Error in status:%08x\n",
834 enabled_intr_status);
835
836 return -1;
837 }
838 } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
839
840 return 0;
841}
842
843/**
844 * ufshcd_get_req_rsp - returns the TR response transaction type
845 */
846static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
847{
848 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
849}
850
851/**
852 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
853 *
854 */
855static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
856{
857 return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
858}
859
860static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
861{
862 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
863}
864
865static int ufshcd_check_query_response(struct ufs_hba *hba)
866{
867 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
868
869 /* Get the UPIU response */
870 query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
871 UPIU_RSP_CODE_OFFSET;
872 return query_res->response;
873}
874
875/**
876 * ufshcd_copy_query_response() - Copy the Query Response and the data
877 * descriptor
878 */
879static int ufshcd_copy_query_response(struct ufs_hba *hba)
880{
881 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
882
883 memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
884
885 /* Get the descriptor */
886 if (hba->dev_cmd.query.descriptor &&
887 hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
888 u8 *descp = (u8 *)hba->ucd_rsp_ptr +
889 GENERAL_UPIU_REQUEST_SIZE;
890 u16 resp_len;
891 u16 buf_len;
892
893 /* data segment length */
894 resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
895 MASK_QUERY_DATA_SEG_LEN;
896 buf_len =
897 be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
898 if (likely(buf_len >= resp_len)) {
899 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
900 } else {
901 dev_warn(hba->dev,
902 "%s: Response size is bigger than buffer",
903 __func__);
904 return -EINVAL;
905 }
906 }
907
908 return 0;
909}
910
911/**
912 * ufshcd_exec_dev_cmd - API for sending device management requests
913 */
914static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
915 int timeout)
916{
917 int err;
918 int resp;
919
920 err = ufshcd_comp_devman_upiu(hba, cmd_type);
921 if (err)
922 return err;
923
924 err = ufshcd_send_command(hba, TASK_TAG);
925 if (err)
926 return err;
927
928 err = ufshcd_get_tr_ocs(hba);
929 if (err) {
930 dev_err(hba->dev, "Error in OCS:%d\n", err);
931 return -EINVAL;
932 }
933
934 resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
935 switch (resp) {
936 case UPIU_TRANSACTION_NOP_IN:
937 break;
938 case UPIU_TRANSACTION_QUERY_RSP:
939 err = ufshcd_check_query_response(hba);
940 if (!err)
941 err = ufshcd_copy_query_response(hba);
942 break;
943 case UPIU_TRANSACTION_REJECT_UPIU:
944 /* TODO: handle Reject UPIU Response */
945 err = -EPERM;
946 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
947 __func__);
948 break;
949 default:
950 err = -EINVAL;
951 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
952 __func__, resp);
953 }
954
955 return err;
956}
957
958/**
959 * ufshcd_init_query() - init the query response and request parameters
960 */
961static inline void ufshcd_init_query(struct ufs_hba *hba,
962 struct ufs_query_req **request,
963 struct ufs_query_res **response,
964 enum query_opcode opcode,
965 u8 idn, u8 index, u8 selector)
966{
967 *request = &hba->dev_cmd.query.request;
968 *response = &hba->dev_cmd.query.response;
969 memset(*request, 0, sizeof(struct ufs_query_req));
970 memset(*response, 0, sizeof(struct ufs_query_res));
971 (*request)->upiu_req.opcode = opcode;
972 (*request)->upiu_req.idn = idn;
973 (*request)->upiu_req.index = index;
974 (*request)->upiu_req.selector = selector;
975}
976
977/**
978 * ufshcd_query_flag() - API function for sending flag query requests
979 */
980int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
981 enum flag_idn idn, bool *flag_res)
982{
983 struct ufs_query_req *request = NULL;
984 struct ufs_query_res *response = NULL;
985 int err, index = 0, selector = 0;
986 int timeout = QUERY_REQ_TIMEOUT;
987
988 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
989 selector);
990
991 switch (opcode) {
992 case UPIU_QUERY_OPCODE_SET_FLAG:
993 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
994 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
995 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
996 break;
997 case UPIU_QUERY_OPCODE_READ_FLAG:
998 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
999 if (!flag_res) {
1000 /* No dummy reads */
1001 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1002 __func__);
1003 err = -EINVAL;
1004 goto out;
1005 }
1006 break;
1007 default:
1008 dev_err(hba->dev,
1009 "%s: Expected query flag opcode but got = %d\n",
1010 __func__, opcode);
1011 err = -EINVAL;
1012 goto out;
1013 }
1014
1015 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1016
1017 if (err) {
1018 dev_err(hba->dev,
1019 "%s: Sending flag query for idn %d failed, err = %d\n",
1020 __func__, idn, err);
1021 goto out;
1022 }
1023
1024 if (flag_res)
1025 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1026 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1027
1028out:
1029 return err;
1030}
1031
1032static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1033 enum query_opcode opcode,
1034 enum flag_idn idn, bool *flag_res)
1035{
1036 int ret;
1037 int retries;
1038
1039 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1040 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1041 if (ret)
1042 dev_dbg(hba->dev,
1043 "%s: failed with error %d, retries %d\n",
1044 __func__, ret, retries);
1045 else
1046 break;
1047 }
1048
1049 if (ret)
1050 dev_err(hba->dev,
1051 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1052 __func__, opcode, idn, ret, retries);
1053 return ret;
1054}
1055
1056static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1057 enum query_opcode opcode,
1058 enum desc_idn idn, u8 index, u8 selector,
1059 u8 *desc_buf, int *buf_len)
1060{
1061 struct ufs_query_req *request = NULL;
1062 struct ufs_query_res *response = NULL;
1063 int err;
1064
1065 if (!desc_buf) {
1066 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1067 __func__, opcode);
1068 err = -EINVAL;
1069 goto out;
1070 }
1071
1072 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1073 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1074 __func__, *buf_len);
1075 err = -EINVAL;
1076 goto out;
1077 }
1078
1079 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1080 selector);
1081 hba->dev_cmd.query.descriptor = desc_buf;
1082 request->upiu_req.length = cpu_to_be16(*buf_len);
1083
1084 switch (opcode) {
1085 case UPIU_QUERY_OPCODE_WRITE_DESC:
1086 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1087 break;
1088 case UPIU_QUERY_OPCODE_READ_DESC:
1089 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1090 break;
1091 default:
1092 dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1093 __func__, opcode);
1094 err = -EINVAL;
1095 goto out;
1096 }
1097
1098 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1099
1100 if (err) {
1101 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1102 __func__, opcode, idn, index, err);
1103 goto out;
1104 }
1105
1106 hba->dev_cmd.query.descriptor = NULL;
1107 *buf_len = be16_to_cpu(response->upiu_res.length);
1108
1109out:
1110 return err;
1111}
1112
1113/**
1114 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1115 */
1116int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1117 enum desc_idn idn, u8 index, u8 selector,
1118 u8 *desc_buf, int *buf_len)
1119{
1120 int err;
1121 int retries;
1122
1123 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1124 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1125 selector, desc_buf, buf_len);
1126 if (!err || err == -EINVAL)
1127 break;
1128 }
1129
1130 return err;
1131}
1132
1133/**
1134 * ufshcd_read_desc_length - read the specified descriptor length from header
1135 */
1136static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1137 int desc_index, int *desc_length)
1138{
1139 int ret;
1140 u8 header[QUERY_DESC_HDR_SIZE];
1141 int header_len = QUERY_DESC_HDR_SIZE;
1142
1143 if (desc_id >= QUERY_DESC_IDN_MAX)
1144 return -EINVAL;
1145
1146 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1147 desc_id, desc_index, 0, header,
1148 &header_len);
1149
1150 if (ret) {
1151 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
1152 __func__, desc_id);
1153 return ret;
1154 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1155 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
1156 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1157 desc_id);
1158 ret = -EINVAL;
1159 }
1160
1161 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1162
1163 return ret;
1164}
1165
1166static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1167{
1168 int err;
1169
1170 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1171 &hba->desc_size.dev_desc);
1172 if (err)
1173 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1174
1175 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1176 &hba->desc_size.pwr_desc);
1177 if (err)
1178 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1179
1180 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1181 &hba->desc_size.interc_desc);
1182 if (err)
1183 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1184
1185 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1186 &hba->desc_size.conf_desc);
1187 if (err)
1188 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1189
1190 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1191 &hba->desc_size.unit_desc);
1192 if (err)
1193 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1194
1195 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1196 &hba->desc_size.geom_desc);
1197 if (err)
1198 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1199
1200 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1201 &hba->desc_size.hlth_desc);
1202 if (err)
1203 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1204}
1205
1206/**
1207 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1208 *
1209 */
1210int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1211 int *desc_len)
1212{
1213 switch (desc_id) {
1214 case QUERY_DESC_IDN_DEVICE:
1215 *desc_len = hba->desc_size.dev_desc;
1216 break;
1217 case QUERY_DESC_IDN_POWER:
1218 *desc_len = hba->desc_size.pwr_desc;
1219 break;
1220 case QUERY_DESC_IDN_GEOMETRY:
1221 *desc_len = hba->desc_size.geom_desc;
1222 break;
1223 case QUERY_DESC_IDN_CONFIGURATION:
1224 *desc_len = hba->desc_size.conf_desc;
1225 break;
1226 case QUERY_DESC_IDN_UNIT:
1227 *desc_len = hba->desc_size.unit_desc;
1228 break;
1229 case QUERY_DESC_IDN_INTERCONNECT:
1230 *desc_len = hba->desc_size.interc_desc;
1231 break;
1232 case QUERY_DESC_IDN_STRING:
1233 *desc_len = QUERY_DESC_MAX_SIZE;
1234 break;
1235 case QUERY_DESC_IDN_HEALTH:
1236 *desc_len = hba->desc_size.hlth_desc;
1237 break;
1238 case QUERY_DESC_IDN_RFU_0:
1239 case QUERY_DESC_IDN_RFU_1:
1240 *desc_len = 0;
1241 break;
1242 default:
1243 *desc_len = 0;
1244 return -EINVAL;
1245 }
1246 return 0;
1247}
1248EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1249
1250/**
1251 * ufshcd_read_desc_param - read the specified descriptor parameter
1252 *
1253 */
1254int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1255 int desc_index, u8 param_offset, u8 *param_read_buf,
1256 u8 param_size)
1257{
1258 int ret;
1259 u8 *desc_buf;
1260 int buff_len;
1261 bool is_kmalloc = true;
1262
1263 /* Safety check */
1264 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1265 return -EINVAL;
1266
1267 /* Get the max length of descriptor from structure filled up at probe
1268 * time.
1269 */
1270 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1271
1272 /* Sanity checks */
1273 if (ret || !buff_len) {
1274 dev_err(hba->dev, "%s: Failed to get full descriptor length",
1275 __func__);
1276 return ret;
1277 }
1278
1279 /* Check whether we need temp memory */
1280 if (param_offset != 0 || param_size < buff_len) {
1281 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1282 if (!desc_buf)
1283 return -ENOMEM;
1284 } else {
1285 desc_buf = param_read_buf;
1286 is_kmalloc = false;
1287 }
1288
1289 /* Request for full descriptor */
1290 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1291 desc_id, desc_index, 0, desc_buf,
1292 &buff_len);
1293
1294 if (ret) {
1295 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1296 __func__, desc_id, desc_index, param_offset, ret);
1297 goto out;
1298 }
1299
1300 /* Sanity check */
1301 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1302 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
1303 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1304 ret = -EINVAL;
1305 goto out;
1306 }
1307
1308 /* Check wherher we will not copy more data, than available */
1309 if (is_kmalloc && param_size > buff_len)
1310 param_size = buff_len;
1311
1312 if (is_kmalloc)
1313 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1314out:
1315 if (is_kmalloc)
1316 kfree(desc_buf);
1317 return ret;
1318}
1319
1320/* replace non-printable or non-ASCII characters with spaces */
1321static inline void ufshcd_remove_non_printable(uint8_t *val)
1322{
1323 if (!val)
1324 return;
1325
1326 if (*val < 0x20 || *val > 0x7e)
1327 *val = ' ';
1328}
1329
1330/**
1331 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1332 * state) and waits for it to take effect.
1333 *
1334 */
1335static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1336{
1337 unsigned long start = 0;
1338 u8 status;
1339 int ret;
1340
1341 ret = ufshcd_send_uic_cmd(hba, cmd);
1342 if (ret) {
1343 dev_err(hba->dev,
1344 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1345 cmd->command, cmd->argument3, ret);
1346
1347 return ret;
1348 }
1349
1350 start = get_timer(0);
1351 do {
1352 status = ufshcd_get_upmcrs(hba);
1353 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1354 dev_err(hba->dev,
1355 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1356 cmd->command, status);
1357 ret = (status != PWR_OK) ? status : -1;
1358 break;
1359 }
1360 } while (status != PWR_LOCAL);
1361
1362 return ret;
1363}
1364
1365/**
1366 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1367 * using DME_SET primitives.
1368 */
1369static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1370{
1371 struct uic_command uic_cmd = {0};
1372 int ret;
1373
1374 uic_cmd.command = UIC_CMD_DME_SET;
1375 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1376 uic_cmd.argument3 = mode;
1377 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1378
1379 return ret;
1380}
1381
1382static
1383void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1384 struct scsi_cmd *pccb, u32 upiu_flags)
1385{
1386 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1387 unsigned int cdb_len;
1388
1389 /* command descriptor fields */
1390 ucd_req_ptr->header.dword_0 =
1391 UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1392 pccb->lun, TASK_TAG);
1393 ucd_req_ptr->header.dword_1 =
1394 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1395
1396 /* Total EHS length and Data segment length will be zero */
1397 ucd_req_ptr->header.dword_2 = 0;
1398
1399 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1400
1401 cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1402 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1403 memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1404
1405 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1406}
1407
1408static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1409 unsigned char *buf, ulong len)
1410{
1411 entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1412 entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1413 entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1414}
1415
1416static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1417{
1418 struct utp_transfer_req_desc *req_desc = hba->utrdl;
1419 struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1420 ulong datalen = pccb->datalen;
1421 int table_length;
1422 u8 *buf;
1423 int i;
1424
1425 if (!datalen) {
1426 req_desc->prd_table_length = 0;
1427 return;
1428 }
1429
1430 table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1431 buf = pccb->pdata;
1432 i = table_length;
1433 while (--i) {
1434 prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1435 MAX_PRDT_ENTRY - 1);
1436 buf += MAX_PRDT_ENTRY;
1437 datalen -= MAX_PRDT_ENTRY;
1438 }
1439
1440 prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1441
1442 req_desc->prd_table_length = table_length;
1443}
1444
1445static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1446{
1447 struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1448 struct utp_transfer_req_desc *req_desc = hba->utrdl;
1449 u32 upiu_flags;
1450 int ocs, result = 0;
1451 u8 scsi_status;
1452
1453 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1454 ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1455 prepare_prdt_table(hba, pccb);
1456
1457 ufshcd_send_command(hba, TASK_TAG);
1458
1459 ocs = ufshcd_get_tr_ocs(hba);
1460 switch (ocs) {
1461 case OCS_SUCCESS:
1462 result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1463 switch (result) {
1464 case UPIU_TRANSACTION_RESPONSE:
1465 result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1466
1467 scsi_status = result & MASK_SCSI_STATUS;
1468 if (scsi_status)
1469 return -EINVAL;
1470
1471 break;
1472 case UPIU_TRANSACTION_REJECT_UPIU:
1473 /* TODO: handle Reject UPIU Response */
1474 dev_err(hba->dev,
1475 "Reject UPIU not fully implemented\n");
1476 return -EINVAL;
1477 default:
1478 dev_err(hba->dev,
1479 "Unexpected request response code = %x\n",
1480 result);
1481 return -EINVAL;
1482 }
1483 break;
1484 default:
1485 dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1486 return -EINVAL;
1487 }
1488
1489 return 0;
1490}
1491
1492static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1493 int desc_index, u8 *buf, u32 size)
1494{
1495 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1496}
1497
1498static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1499{
1500 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1501}
1502
1503/**
1504 * ufshcd_read_string_desc - read string descriptor
1505 *
1506 */
1507int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1508 u8 *buf, u32 size, bool ascii)
1509{
1510 int err = 0;
1511
1512 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1513 size);
1514
1515 if (err) {
1516 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1517 __func__, QUERY_REQ_RETRIES, err);
1518 goto out;
1519 }
1520
1521 if (ascii) {
1522 int desc_len;
1523 int ascii_len;
1524 int i;
1525 u8 *buff_ascii;
1526
1527 desc_len = buf[0];
1528 /* remove header and divide by 2 to move from UTF16 to UTF8 */
1529 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1530 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1531 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1532 __func__);
1533 err = -ENOMEM;
1534 goto out;
1535 }
1536
1537 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
1538 if (!buff_ascii) {
1539 err = -ENOMEM;
1540 goto out;
1541 }
1542
1543 /*
1544 * the descriptor contains string in UTF16 format
1545 * we need to convert to utf-8 so it can be displayed
1546 */
1547 utf16_to_utf8(buff_ascii,
1548 (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1549
1550 /* replace non-printable or non-ASCII characters with spaces */
1551 for (i = 0; i < ascii_len; i++)
1552 ufshcd_remove_non_printable(&buff_ascii[i]);
1553
1554 memset(buf + QUERY_DESC_HDR_SIZE, 0,
1555 size - QUERY_DESC_HDR_SIZE);
1556 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1557 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1558 kfree(buff_ascii);
1559 }
1560out:
1561 return err;
1562}
1563
1564static int ufs_get_device_desc(struct ufs_hba *hba,
1565 struct ufs_dev_desc *dev_desc)
1566{
1567 int err;
1568 size_t buff_len;
1569 u8 model_index;
1570 u8 *desc_buf;
1571
1572 buff_len = max_t(size_t, hba->desc_size.dev_desc,
1573 QUERY_DESC_MAX_SIZE + 1);
1574 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1575 if (!desc_buf) {
1576 err = -ENOMEM;
1577 goto out;
1578 }
1579
1580 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
1581 if (err) {
1582 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1583 __func__, err);
1584 goto out;
1585 }
1586
1587 /*
1588 * getting vendor (manufacturerID) and Bank Index in big endian
1589 * format
1590 */
1591 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
1592 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
1593
1594 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
1595
1596 /* Zero-pad entire buffer for string termination. */
1597 memset(desc_buf, 0, buff_len);
1598
1599 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
1600 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
1601 if (err) {
1602 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
1603 __func__, err);
1604 goto out;
1605 }
1606
1607 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
1608 strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
1609 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
1610 MAX_MODEL_LEN));
1611
1612 /* Null terminate the model string */
1613 dev_desc->model[MAX_MODEL_LEN] = '\0';
1614
1615out:
1616 kfree(desc_buf);
1617 return err;
1618}
1619
1620/**
1621 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1622 */
1623static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1624{
1625 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1626
1627 if (hba->max_pwr_info.is_valid)
1628 return 0;
1629
1630 pwr_info->pwr_tx = FAST_MODE;
1631 pwr_info->pwr_rx = FAST_MODE;
1632 pwr_info->hs_rate = PA_HS_MODE_B;
1633
1634 /* Get the connected lane count */
1635 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1636 &pwr_info->lane_rx);
1637 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1638 &pwr_info->lane_tx);
1639
1640 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1641 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1642 __func__, pwr_info->lane_rx, pwr_info->lane_tx);
1643 return -EINVAL;
1644 }
1645
1646 /*
1647 * First, get the maximum gears of HS speed.
1648 * If a zero value, it means there is no HSGEAR capability.
1649 * Then, get the maximum gears of PWM speed.
1650 */
1651 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1652 if (!pwr_info->gear_rx) {
1653 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1654 &pwr_info->gear_rx);
1655 if (!pwr_info->gear_rx) {
1656 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1657 __func__, pwr_info->gear_rx);
1658 return -EINVAL;
1659 }
1660 pwr_info->pwr_rx = SLOW_MODE;
1661 }
1662
1663 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1664 &pwr_info->gear_tx);
1665 if (!pwr_info->gear_tx) {
1666 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1667 &pwr_info->gear_tx);
1668 if (!pwr_info->gear_tx) {
1669 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1670 __func__, pwr_info->gear_tx);
1671 return -EINVAL;
1672 }
1673 pwr_info->pwr_tx = SLOW_MODE;
1674 }
1675
1676 hba->max_pwr_info.is_valid = true;
1677 return 0;
1678}
1679
1680static int ufshcd_change_power_mode(struct ufs_hba *hba,
1681 struct ufs_pa_layer_attr *pwr_mode)
1682{
1683 int ret;
1684
1685 /* if already configured to the requested pwr_mode */
1686 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1687 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1688 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1689 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1690 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1691 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1692 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1693 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1694 return 0;
1695 }
1696
1697 /*
1698 * Configure attributes for power mode change with below.
1699 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1700 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1701 * - PA_HSSERIES
1702 */
1703 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1704 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1705 pwr_mode->lane_rx);
1706 if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1707 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1708 else
1709 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1710
1711 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1712 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1713 pwr_mode->lane_tx);
1714 if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1715 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1716 else
1717 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1718
1719 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1720 pwr_mode->pwr_tx == FASTAUTO_MODE ||
1721 pwr_mode->pwr_rx == FAST_MODE ||
1722 pwr_mode->pwr_tx == FAST_MODE)
1723 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1724 pwr_mode->hs_rate);
1725
1726 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1727 pwr_mode->pwr_tx);
1728
1729 if (ret) {
1730 dev_err(hba->dev,
1731 "%s: power mode change failed %d\n", __func__, ret);
1732
1733 return ret;
1734 }
1735
1736 /* Copy new Power Mode to power info */
1737 memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1738
1739 return ret;
1740}
1741
1742/**
1743 * ufshcd_verify_dev_init() - Verify device initialization
1744 *
1745 */
1746static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1747{
1748 int retries;
1749 int err;
1750
1751 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1752 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1753 NOP_OUT_TIMEOUT);
1754 if (!err || err == -ETIMEDOUT)
1755 break;
1756
1757 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1758 }
1759
1760 if (err)
1761 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1762
1763 return err;
1764}
1765
1766/**
1767 * ufshcd_complete_dev_init() - checks device readiness
1768 */
1769static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1770{
1771 int i;
1772 int err;
1773 bool flag_res = 1;
1774
1775 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1776 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1777 if (err) {
1778 dev_err(hba->dev,
1779 "%s setting fDeviceInit flag failed with error %d\n",
1780 __func__, err);
1781 goto out;
1782 }
1783
1784 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1785 for (i = 0; i < 1000 && !err && flag_res; i++)
1786 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1787 QUERY_FLAG_IDN_FDEVICEINIT,
1788 &flag_res);
1789
1790 if (err)
1791 dev_err(hba->dev,
1792 "%s reading fDeviceInit flag failed with error %d\n",
1793 __func__, err);
1794 else if (flag_res)
1795 dev_err(hba->dev,
1796 "%s fDeviceInit was not cleared by the device\n",
1797 __func__);
1798
1799out:
1800 return err;
1801}
1802
1803static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1804{
1805 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1806 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1807 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1808 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1809 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1810 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1811 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1812}
1813
1814int ufs_start(struct ufs_hba *hba)
1815{
1816 struct ufs_dev_desc card = {0};
1817 int ret;
1818
1819 ret = ufshcd_link_startup(hba);
1820 if (ret)
1821 return ret;
1822
1823 ret = ufshcd_verify_dev_init(hba);
1824 if (ret)
1825 return ret;
1826
1827 ret = ufshcd_complete_dev_init(hba);
1828 if (ret)
1829 return ret;
1830
1831 /* Init check for device descriptor sizes */
1832 ufshcd_init_desc_sizes(hba);
1833
1834 ret = ufs_get_device_desc(hba, &card);
1835 if (ret) {
1836 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1837 __func__, ret);
1838
1839 return ret;
1840 }
1841
1842 if (ufshcd_get_max_pwr_mode(hba)) {
1843 dev_err(hba->dev,
1844 "%s: Failed getting max supported power mode\n",
1845 __func__);
1846 } else {
1847 ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1848 if (ret) {
1849 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1850 __func__, ret);
1851
1852 return ret;
1853 }
1854
1855 printf("Device at %s up at:", hba->dev->name);
1856 ufshcd_print_pwr_info(hba);
1857 }
1858
1859 return 0;
1860}
1861
1862int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1863{
1864 struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1865 struct scsi_platdata *scsi_plat;
1866 struct udevice *scsi_dev;
1867 int err;
1868
1869 device_find_first_child(ufs_dev, &scsi_dev);
1870 if (!scsi_dev)
1871 return -ENODEV;
1872
1873 scsi_plat = dev_get_uclass_platdata(scsi_dev);
1874 scsi_plat->max_id = UFSHCD_MAX_ID;
1875 scsi_plat->max_lun = UFS_MAX_LUNS;
1876 scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
1877
1878 hba->dev = ufs_dev;
1879 hba->ops = hba_ops;
1880 hba->mmio_base = (void *)dev_read_addr(ufs_dev);
1881
1882 /* Set descriptor lengths to specification defaults */
1883 ufshcd_def_desc_sizes(hba);
1884
1885 ufshcd_ops_init(hba);
1886
1887 /* Read capabilties registers */
1888 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1889
1890 /* Get UFS version supported by the controller */
1891 hba->version = ufshcd_get_ufs_version(hba);
1892 if (hba->version != UFSHCI_VERSION_10 &&
1893 hba->version != UFSHCI_VERSION_11 &&
1894 hba->version != UFSHCI_VERSION_20 &&
1895 hba->version != UFSHCI_VERSION_21)
1896 dev_err(hba->dev, "invalid UFS version 0x%x\n",
1897 hba->version);
1898
1899 /* Get Interrupt bit mask per version */
1900 hba->intr_mask = ufshcd_get_intr_mask(hba);
1901
1902 /* Allocate memory for host memory space */
1903 err = ufshcd_memory_alloc(hba);
1904 if (err) {
1905 dev_err(hba->dev, "Memory allocation failed\n");
1906 return err;
1907 }
1908
1909 /* Configure Local data structures */
1910 ufshcd_host_memory_configure(hba);
1911
1912 /*
1913 * In order to avoid any spurious interrupt immediately after
1914 * registering UFS controller interrupt handler, clear any pending UFS
1915 * interrupt status and disable all the UFS interrupts.
1916 */
1917 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
1918 REG_INTERRUPT_STATUS);
1919 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
1920
1921 err = ufshcd_hba_enable(hba);
1922 if (err) {
1923 dev_err(hba->dev, "Host controller enable failed\n");
1924 return err;
1925 }
1926
1927 err = ufs_start(hba);
1928 if (err)
1929 return err;
1930
1931 return 0;
1932}
1933
1934int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
1935{
1936 int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
1937 scsi_devp);
1938
1939 return ret;
1940}
1941
1942static struct scsi_ops ufs_ops = {
1943 .exec = ufs_scsi_exec,
1944};
1945
1946int ufs_probe_dev(int index)
1947{
1948 struct udevice *dev;
1949
1950 return uclass_get_device(UCLASS_UFS, index, &dev);
1951}
1952
1953int ufs_probe(void)
1954{
1955 struct udevice *dev;
1956 int ret, i;
1957
1958 for (i = 0;; i++) {
1959 ret = uclass_get_device(UCLASS_UFS, i, &dev);
1960 if (ret == -ENODEV)
1961 break;
1962 }
1963
1964 return 0;
1965}
1966
1967U_BOOT_DRIVER(ufs_scsi) = {
1968 .id = UCLASS_SCSI,
1969 .name = "ufs_scsi",
1970 .ops = &ufs_ops,
1971};