blob: 41469a186a3fa9dfe090c551e4aa543824cb744d [file] [log] [blame]
Siddharth Vadapalli445dbb62024-10-14 11:09:23 +05301// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/*
3 * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com
4 *
5 * PCIe controller driver for TI's K3 SoCs with Cadence PCIe controller
6 *
7 * Ported from the Linux driver - drivers/pci/controller/cadence/pci-j721e.c
8 *
9 * Author: Siddharth Vadapalli <s-vadapalli@ti.com>
10 *
11 */
12
13#include <asm/gpio.h>
14#include <clk-uclass.h>
15#include <dm.h>
16#include <dm/device_compat.h>
17#include <generic-phy.h>
18#include <linux/delay.h>
19#include <linux/io.h>
20#include <linux/ioport.h>
21#include <linux/log2.h>
22#include <power-domain.h>
23#include <regmap.h>
24#include <syscon.h>
25
26#define CDNS_PCIE_LM_BASE 0x00100000
27#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
28#define CDNS_PCIE_LTSSM_CTRL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
29#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
30
31#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
32#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
33#define CDNS_PCIE_LM_ID_VENDOR(vid) \
34 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
35#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
36#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
37#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
38 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
39
40#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
41#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
42 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
43
44#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
45#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
46 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
47
48#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
49#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
50#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
51#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
52
53#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
54#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
55#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
56#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
57#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
58
59#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
60 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
61#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
62 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
63#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
64 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
65#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
66 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
67#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
68 (((aperture) - 2) << ((bar) * 8))
69
70#define CDNS_PCIE_RP_BASE 0x00200000
71#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
72
73/*
74 * Address Translation Registers
75 */
76#define CDNS_PCIE_AT_BASE 0x00400000
77
78/* Region r Outbound AXI to PCIe Address Translation Register 0 */
79#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
80 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
81#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
82#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
83 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
84#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
85#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
86 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
87#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
88#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
89 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
90
91/* Region r Outbound AXI to PCIe Address Translation Register 1 */
92#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
93 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
94
95/* Region r Outbound PCIe Descriptor Register 0 */
96#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
97 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
98#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
99#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
100#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
101#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
102
103/* Bit 23 MUST be set in RC mode. */
104#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
105#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
106#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
107 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
108
109/* Region r Outbound PCIe Descriptor Register 1 */
110#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
111 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
112#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
113#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
114 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
115
116/* Region r AXI Region Base Address Register 0 */
117#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
118 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
119#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
120#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
121 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
122
123/* Region r AXI Region Base Address Register 1 */
124#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
125 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
126
127/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
128#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
129 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
130#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
131#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
132 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
133#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
134 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
135
136/* AXI link down register */
137#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
138
139#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
140#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
141#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
142 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
143 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
144
145#define CDNS_PCIE_RP_MAX_IB 0x3
146
147#define LINK_TRAINING_ENABLE BIT(0)
148#define LINK_WAIT_MAX_RETRIES 10
149#define LINK_WAIT_UDELAY_MAX 100000
150#define LINK_RETRAIN_MAX_RETRIES 1000
151
152#define PCIE_USER_CMD_STATUS_REG_OFFSET 0x4
153#define PCIE_USER_LINK_STATUS_REG_OFFSET 0x14
154#define PCIE_USER_LINK_STATUS_MASK GENMASK(1, 0)
155
156#define CDNS_TI_PCIE_MODE_RC BIT(7)
157#define PCIE_MODE_SEL_MASK BIT(7)
158#define PCIE_GEN_SEL_MASK GENMASK(1, 0)
159#define PCIE_LINK_WIDTH_MASK GENMASK(9, 8)
160
161enum cdns_ti_pcie_mode {
162 PCIE_MODE_RC,
163 PCIE_MODE_EP,
164};
165
166enum cdns_pcie_rp_bar {
167 RP_BAR_UNDEFINED = -1,
168 RP_BAR0,
169 RP_BAR1,
170 RP_NO_BAR
171};
172
173static u8 bar_aperture_mask[] = {
174 [RP_BAR0] = 0x1F,
175 [RP_BAR1] = 0xF,
176};
177
178enum link_status {
179 NO_RECEIVERS_DETECTED,
180 LINK_TRAINING_IN_PROGRESS,
181 LINK_UP_DL_IN_PROGRESS,
182 LINK_UP_DL_COMPLETED,
183};
184
185struct pcie_cdns_ti_data {
186 enum cdns_ti_pcie_mode mode;
187 unsigned int quirk_retrain_flag:1;
188 unsigned int quirk_detect_quiet_flag:1;
189 unsigned int max_lanes;
190};
191
192struct pcie_cdns_ti {
193 struct udevice *dev;
194 void __iomem *intd_cfg_base;
195 void __iomem *user_cfg_base;
196 void __iomem *reg_base;
197 void __iomem *cfg_base;
198 fdt_size_t cfg_size;
199 struct regmap *syscon_base;
200 struct pci_controller *host_bridge;
201 u32 device_id;
202 u32 max_link_speed;
203 u32 num_lanes;
204 u32 pcie_ctrl_offset;
205 u32 vendor_id;
206 u32 mode;
207 unsigned int quirk_retrain_flag:1;
208 unsigned int quirk_detect_quiet_flag:1;
209 bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
210
211 /* IO, MEM & PREFETCH PCI regions */
212 struct pci_region io;
213 struct pci_region mem;
214 struct pci_region prefetch;
215};
216
217/* Cadence PCIe Controller register access helpers */
218static inline void pcie_cdns_ti_writel(struct pcie_cdns_ti *pcie, u32 reg, u32 val)
219{
220 writel(val, pcie->reg_base + reg);
221}
222
223static inline u32 pcie_cdns_ti_readl(struct pcie_cdns_ti *pcie, u32 reg)
224{
225 return readl(pcie->reg_base + reg);
226}
227
228/* Root Port register access helpers */
229static inline void pcie_cdns_ti_rp_writeb(struct pcie_cdns_ti *pcie,
230 u32 reg, u8 val)
231{
232 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
233
234 writeb(val, addr);
235}
236
237static inline void pcie_cdns_ti_rp_writew(struct pcie_cdns_ti *pcie,
238 u32 reg, u16 val)
239{
240 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
241
242 writew(val, addr);
243}
244
245static inline u16 pcie_cdns_ti_rp_readw(struct pcie_cdns_ti *pcie, u32 reg)
246{
247 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
248
249 return readw(addr);
250}
251
252/* User register access helpers */
253static inline u32 pcie_cdns_ti_user_readl(struct pcie_cdns_ti *pcie, u32 offset)
254{
255 return readl(pcie->user_cfg_base + offset);
256}
257
258static inline void pcie_cdns_ti_user_writel(struct pcie_cdns_ti *pcie, u32 offset,
259 u32 val)
260{
261 writel(val, pcie->user_cfg_base + offset);
262}
263
264void __iomem *pcie_cdns_ti_map_bus(struct pcie_cdns_ti *pcie, pci_dev_t bdf,
265 uint offset)
266{
267 int busnr, devnr, funcnr, devfn;
268 u32 addr0, desc0;
269
270 busnr = PCI_BUS(bdf);
271 devnr = PCI_DEV(bdf);
272 funcnr = PCI_FUNC(bdf);
273 devfn = (devnr << 3) | funcnr;
274
275 if (busnr == 0) {
276 if (devfn)
277 return NULL;
278 return pcie->reg_base + (offset & 0xfff);
279 }
280
281 if (!(pcie_cdns_ti_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
282 return NULL;
283
284 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
285
286 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
287 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
288 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busnr);
289 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
290
291 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
292 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
293
294 if (busnr == 1)
295 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
296 else
297 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
298
299 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
300
301 return pcie->cfg_base + (offset & 0xfff);
302}
303
304static int pcie_cdns_ti_read_config(const struct udevice *bus, pci_dev_t bdf,
305 uint offset, ulong *valuep,
306 enum pci_size_t size)
307{
308 struct pcie_cdns_ti *pcie = dev_get_priv(bus);
309 void __iomem *addr;
310 ulong value;
311
312 addr = pcie_cdns_ti_map_bus(pcie, bdf, offset & ~0x3);
313 if (!addr) {
314 debug("%s: bdf out of range\n", __func__);
315 *valuep = pci_get_ff(size);
316 return 0;
317 }
318
319 value = readl(addr);
320 *valuep = pci_conv_32_to_size(value, offset, size);
321
322 return 0;
323}
324
325static int pcie_cdns_ti_write_config(struct udevice *bus, pci_dev_t bdf,
326 uint offset, ulong value,
327 enum pci_size_t size)
328{
329 struct pcie_cdns_ti *pcie = dev_get_priv(bus);
330 void __iomem *addr;
331 ulong prev;
332
333 addr = pcie_cdns_ti_map_bus(pcie, bdf, offset & ~0x3);
334 if (!addr) {
335 debug("%s: bdf out of range\n", __func__);
336 return 0;
337 }
338
339 prev = readl(addr);
340 value = pci_conv_size_to_32(prev, value, offset, size);
341 writel(value, addr);
342
343 return 0;
344}
345
346static int pcie_cdns_ti_ctrl_init(struct pcie_cdns_ti *pcie)
347{
348 struct regmap *syscon = pcie->syscon_base;
349 u32 val = 0;
350
351 if (pcie->mode == PCIE_MODE_RC)
352 val = CDNS_TI_PCIE_MODE_RC;
353
354 /* Set mode of operation */
355 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_MODE_SEL_MASK,
356 val);
357
358 /* Set link speed */
359 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_GEN_SEL_MASK,
360 pcie->max_link_speed - 1);
361
362 /* Set link width */
363 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_LINK_WIDTH_MASK,
364 (pcie->num_lanes - 1) << 8);
365 return 0;
366}
367
368static void pcie_cdns_ti_detect_quiet_quirk(struct pcie_cdns_ti *pcie)
369{
370 u32 delay = 0x3;
371 u32 ltssm_ctrl_cap;
372
373 ltssm_ctrl_cap = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LTSSM_CTRL_CAP);
374 ltssm_ctrl_cap = ((ltssm_ctrl_cap &
375 ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
376 CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
377
378 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LTSSM_CTRL_CAP, ltssm_ctrl_cap);
379 ltssm_ctrl_cap = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LTSSM_CTRL_CAP);
380}
381
382static void pcie_cdns_ti_start_user_link(struct pcie_cdns_ti *pcie)
383{
384 u32 reg;
385
386 reg = pcie_cdns_ti_user_readl(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET);
387 reg |= LINK_TRAINING_ENABLE;
388 pcie_cdns_ti_user_writel(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET, reg);
389}
390
391static bool pcie_cdns_ti_user_link_up(struct pcie_cdns_ti *pcie)
392{
393 u32 reg;
394
395 reg = pcie_cdns_ti_user_readl(pcie, PCIE_USER_LINK_STATUS_REG_OFFSET);
396 reg &= PCIE_USER_LINK_STATUS_MASK;
397 if (reg == LINK_UP_DL_COMPLETED)
398 return true;
399
400 return false;
401}
402
403static int pcie_cdns_ti_host_wait_for_link(struct pcie_cdns_ti *pcie)
404{
405 int retries;
406
407 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
408 if (pcie_cdns_ti_user_link_up(pcie)) {
409 dev_info(pcie->dev, "link up\n");
410 return 0;
411 }
412 udelay(LINK_WAIT_UDELAY_MAX);
413 }
414
415 dev_err(pcie->dev, "failed to bring up link\n");
416 return -ETIMEDOUT;
417}
418
419static int pcie_cdns_ti_host_training_complete(struct pcie_cdns_ti *pcie)
420{
421 u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
422 int retries;
423 u16 lnk_stat;
424
425 /* Wait for link training to complete */
426 for (retries = 0; retries < LINK_RETRAIN_MAX_RETRIES; retries++) {
427 lnk_stat = pcie_cdns_ti_rp_readw(pcie, pcie_cap_off +
428 PCI_EXP_LNKSTA);
429 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
430 break;
431 udelay(1000);
432 }
433
434 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
435 return 0;
436
437 return -ETIMEDOUT;
438}
439
440static int pcie_cdns_ti_retrain_link(struct pcie_cdns_ti *pcie)
441{
442 u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
443 u16 lnk_stat, lnk_ctl;
444 int ret = 0;
445
446 lnk_cap_sls = pcie_cdns_ti_readl(pcie, (CDNS_PCIE_RP_BASE +
447 pcie_cap_off +
448 PCI_EXP_LNKCAP));
449 if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
450 return ret;
451
452 lnk_stat = pcie_cdns_ti_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
453 if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
454 lnk_ctl = pcie_cdns_ti_rp_readw(pcie,
455 pcie_cap_off + PCI_EXP_LNKCTL);
456 lnk_ctl |= PCI_EXP_LNKCTL_RL;
457 pcie_cdns_ti_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
458 lnk_ctl);
459
460 ret = pcie_cdns_ti_host_training_complete(pcie);
461 if (ret)
462 return ret;
463
464 ret = pcie_cdns_ti_host_wait_for_link(pcie);
465 }
466 return ret;
467}
468
469static int pcie_cdns_ti_start_host_link(struct pcie_cdns_ti *pcie)
470{
471 int ret;
472
473 ret = pcie_cdns_ti_host_wait_for_link(pcie);
474 if (!ret && pcie->quirk_retrain_flag)
475 ret = pcie_cdns_ti_retrain_link(pcie);
476
477 return ret;
478}
479
480static void pcie_cdns_ti_init_root_port(struct pcie_cdns_ti *pcie)
481{
482 u32 val, ctrl, id;
483
484 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
485 val = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
486 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
487 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
488 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
489 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
490 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
491 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, val);
492
493 if (pcie->vendor_id != 0xffff) {
494 id = CDNS_PCIE_LM_ID_VENDOR(pcie->vendor_id) |
495 CDNS_PCIE_LM_ID_SUBSYS(pcie->vendor_id);
496 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_ID, id);
497 }
498
499 if (pcie->device_id != 0xffff)
500 pcie_cdns_ti_rp_writew(pcie, PCI_DEVICE_ID, pcie->device_id);
501
502 pcie_cdns_ti_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
503 pcie_cdns_ti_rp_writeb(pcie, PCI_CLASS_PROG, 0);
504 pcie_cdns_ti_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
505}
506
507void pcie_cdns_ti_set_outbound_region(struct pcie_cdns_ti *pcie, u8 busnr,
508 u8 fn, u32 r, bool is_io, u64 cpu_addr,
509 u64 pci_addr, u32 size)
510{
511 u64 sz = 1ULL << fls64(size - 1);
512 int nbits = ilog2(sz);
513 u32 addr0, addr1, desc0, desc1;
514
515 if (nbits < 8)
516 nbits = 8;
517
518 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
519 (lower_32_bits(pci_addr) & GENMASK(31, 8));
520 addr1 = upper_32_bits(pci_addr);
521
522 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
523 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
524
525 if (is_io)
526 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
527 else
528 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
529 desc1 = 0;
530
531 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
532 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
533 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
534
535 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
536 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
537
538 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
539 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
540 addr1 = upper_32_bits(cpu_addr);
541
542 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
543 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
544}
545
546static int pcie_cdns_ti_bar_ib_config(struct pcie_cdns_ti *pcie,
547 enum cdns_pcie_rp_bar bar,
548 u64 cpu_addr, u64 size,
549 unsigned long flags)
550{
551 u32 addr0, addr1, aperture, value;
552
553 if (!pcie->avail_ib_bar[bar])
554 return -EBUSY;
555
556 pcie->avail_ib_bar[bar] = false;
557
558 aperture = ilog2(size);
559 addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
560 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
561 addr1 = upper_32_bits(cpu_addr);
562 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
563 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
564
565 if (bar == RP_NO_BAR)
566 return 0;
567
568 value = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
569 value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
570 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
571 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
572 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
573 LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
574 if (size + cpu_addr >= SZ_4G) {
575 if (!(flags & IORESOURCE_PREFETCH))
576 value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
577 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
578 } else {
579 if (!(flags & IORESOURCE_PREFETCH))
580 value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
581 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
582 }
583
584 value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
585 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
586
587 return 0;
588}
589
590static int pcie_cdns_ti_map_dma_ranges(struct pcie_cdns_ti *pcie)
591{
592 u32 no_bar_nbits = 32;
593 int ret;
594
595 /*
596 * Assume that DMA-Ranges have not been specified.
597 * TODO: Add support for "dma-ranges".
598 */
599 dev_read_u32(pcie->dev, "cdns,no-bar-match-nbits",
600 &no_bar_nbits);
601 ret = pcie_cdns_ti_bar_ib_config(pcie, RP_NO_BAR, 0x0,
602 (u64)1 << no_bar_nbits, 0);
603 if (ret)
604 dev_err(pcie->dev, "IB BAR: %d config failed\n",
605 RP_NO_BAR);
606 return ret;
607}
608
609static int pcie_cdns_ti_init_address_translation(struct pcie_cdns_ti *pcie)
610{
611 struct pci_controller *hb = pcie->host_bridge;
612 u32 addr0, addr1, desc1, region = 1;
613 u64 cpu_addr = (u64)pcie->cfg_base;
614 int i, busnr = 0;
615
616 /*
617 * Reserve region 0 for PCI configure space accesses:
618 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
619 * cdns_pci_map_bus(), other region registers are set here once for all.
620 */
621 addr1 = 0; /* Should be programmed to zero. */
622 desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
623 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
624 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
625
626 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
627 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
628 addr1 = upper_32_bits(cpu_addr);
629 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
630 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
631
632 for (i = 0; i < hb->region_count; i++) {
633 if (hb->regions[i].flags == PCI_REGION_IO) {
634 pcie->io.phys_start = hb->regions[i].phys_start; /* IO base */
635 pcie->io.bus_start = hb->regions[i].bus_start; /* IO_bus_addr */
636 pcie->io.size = hb->regions[i].size; /* IO size */
637
638 pcie_cdns_ti_set_outbound_region(pcie, busnr, 0, region,
639 true, pcie->io.phys_start,
640 pcie->io.bus_start,
641 pcie->io.size);
642 } else {
643 pcie->mem.phys_start = hb->regions[i].phys_start; /* MEM base */
644 pcie->mem.bus_start = hb->regions[i].bus_start; /* MEM_bus_addr */
645 pcie->mem.size = hb->regions[i].size; /* MEM size */
646
647 pcie_cdns_ti_set_outbound_region(pcie, busnr, 0, region,
648 false, pcie->mem.phys_start,
649 pcie->mem.bus_start,
650 pcie->mem.size);
651 }
652 region++;
653 }
654
655 return pcie_cdns_ti_map_dma_ranges(pcie);
656}
657
658static int pcie_cdns_ti_host_init(struct pcie_cdns_ti *pcie)
659{
660 pcie_cdns_ti_init_root_port(pcie);
661
662 return pcie_cdns_ti_init_address_translation(pcie);
663}
664
665static int pcie_cdns_ti_setup_host(struct pcie_cdns_ti *pcie)
666{
667 enum cdns_pcie_rp_bar bar;
668 int ret;
669
670 if (pcie->quirk_detect_quiet_flag)
671 pcie_cdns_ti_detect_quiet_quirk(pcie);
672
673 pcie_cdns_ti_start_user_link(pcie);
674
675 ret = pcie_cdns_ti_start_host_link(pcie);
676 if (ret)
677 return ret;
678
679 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
680 pcie->avail_ib_bar[bar] = true;
681
682 ret = pcie_cdns_ti_host_init(pcie);
683 if (ret)
684 return ret;
685
686 return 0;
687}
688
689static int pcie_cdns_ti_probe(struct udevice *dev)
690{
691 struct pcie_cdns_ti *pcie = dev_get_priv(dev);
692 struct udevice *pci_ctlr = pci_get_controller(dev);
693 struct pci_controller *host_bridge = dev_get_uclass_priv(pci_ctlr);
694 const struct pcie_cdns_ti_data *data;
695 struct power_domain pci_pwrdmn;
696 struct gpio_desc *gpiod;
697 struct phy serdes;
698 struct clk *clk;
699 int ret;
700
701 pcie->dev = dev;
702 pcie->host_bridge = host_bridge;
703
704 data = (struct pcie_cdns_ti_data *)dev_get_driver_data(dev);
705 if (!data)
706 return -EINVAL;
707
708 pcie->mode = data->mode;
709 pcie->quirk_retrain_flag = data->quirk_retrain_flag;
710 pcie->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
711
712 if (pcie->num_lanes > data->max_lanes) {
713 dev_warn(dev, "cannot support %d lanes, defaulting to %d\n",
714 pcie->num_lanes, data->max_lanes);
715 pcie->num_lanes = data->max_lanes;
716 }
717
718 ret = power_domain_get_by_index(dev, &pci_pwrdmn, 0);
719 if (ret) {
720 dev_err(dev, "failed to get power domain\n");
721 return ret;
722 }
723
724 ret = power_domain_on(&pci_pwrdmn);
725 if (ret) {
726 dev_err(dev, "failed to power on\n");
727 return ret;
728 }
729
730 clk = devm_clk_get(dev, "fck");
731 if (IS_ERR(clk)) {
732 ret = PTR_ERR(clk);
733 dev_err(dev, "failed to get functional clock\n");
734 return ret;
735 }
736
737 ret = generic_phy_get_by_name(dev, "pcie-phy", &serdes);
738 if (ret) {
739 dev_err(dev, "unable to get serdes");
740 return ret;
741 }
742 generic_phy_reset(&serdes);
743 generic_phy_init(&serdes);
744 generic_phy_power_on(&serdes);
745
746 ret = pcie_cdns_ti_ctrl_init(pcie);
747 if (ret)
748 return ret;
749
750 gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_IS_OUT);
751 if (IS_ERR(gpiod)) {
752 ret = PTR_ERR(gpiod);
753 if (ret != -EPROBE_DEFER)
754 dev_err(dev, "Failed to get reset GPIO\n");
755 return ret;
756 }
757
758 if (gpiod) {
759 ret = dm_gpio_set_value(gpiod, 0);
760 udelay(200);
761 ret = dm_gpio_set_value(gpiod, 1);
762 if (ret)
763 return ret;
764 }
765
766 ret = pcie_cdns_ti_setup_host(pcie);
767 if (ret)
768 return ret;
769
770 return 0;
771}
772
773static int pcie_cdns_ti_of_to_plat(struct udevice *dev)
774{
775 struct pcie_cdns_ti *pcie = dev_get_priv(dev);
776 struct regmap *syscon;
777 u32 offset;
778 int ret;
779
780 pcie->intd_cfg_base = dev_remap_addr_name(dev, "intd_cfg");
781 if (!pcie->intd_cfg_base)
782 return -EINVAL;
783
784 pcie->user_cfg_base = dev_remap_addr_name(dev, "user_cfg");
785 if (!pcie->user_cfg_base)
786 return -EINVAL;
787
788 pcie->reg_base = dev_remap_addr_name(dev, "reg");
789 if (!pcie->reg_base)
790 return -EINVAL;
791
792 pcie->cfg_base = dev_remap_addr_name(dev, "cfg");
793 if (!pcie->cfg_base)
794 return -EINVAL;
795
796 pcie->vendor_id = 0xffff;
797 pcie->device_id = 0xffff;
798 dev_read_u32(dev, "vendor-id", &pcie->vendor_id);
799 dev_read_u32(dev, "device-id", &pcie->device_id);
800
801 ret = dev_read_u32(dev, "num-lanes", &pcie->num_lanes);
802 if (ret)
803 return ret;
804
805 ret = dev_read_u32(dev, "max-link-speed", &pcie->max_link_speed);
806 if (ret)
807 return ret;
808
809 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-pcie-ctrl");
810 if (IS_ERR(syscon)) {
811 if (PTR_ERR(syscon) == -ENODEV)
812 return 0;
813 return PTR_ERR(syscon);
814 }
815
816 ret = dev_read_u32_index(dev, "ti,syscon-pcie-ctrl", 1, &offset);
817 if (ret)
818 return ret;
819
820 pcie->syscon_base = syscon;
821 pcie->pcie_ctrl_offset = offset;
822
823 return 0;
824}
825
826static const struct dm_pci_ops pcie_cdns_ti_ops = {
827 .read_config = pcie_cdns_ti_read_config,
828 .write_config = pcie_cdns_ti_write_config,
829};
830
831static const struct pcie_cdns_ti_data j7200_pcie_rc_data = {
832 .mode = PCIE_MODE_RC,
833 .quirk_detect_quiet_flag = true,
834 .max_lanes = 2,
835};
836
837static const struct udevice_id pcie_cdns_ti_ids[] = {
838 {
839 .compatible = "ti,j7200-pcie-host",
840 .data = (ulong)&j7200_pcie_rc_data,
841 },
842 {},
843};
844
845U_BOOT_DRIVER(pcie_cdns_ti) = {
846 .name = "pcie_cdns_ti",
847 .id = UCLASS_PCI,
848 .of_match = pcie_cdns_ti_ids,
849 .ops = &pcie_cdns_ti_ops,
850 .of_to_plat = pcie_cdns_ti_of_to_plat,
851 .probe = pcie_cdns_ti_probe,
852 .priv_auto = sizeof(struct pcie_cdns_ti),
853};