blob: 9d1d123a18cbe45cfcad21177ea069f3bb6449a7 [file] [log] [blame]
Siddharth Vadapalli445dbb62024-10-14 11:09:23 +05301// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/*
3 * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com
4 *
5 * PCIe controller driver for TI's K3 SoCs with Cadence PCIe controller
6 *
7 * Ported from the Linux driver - drivers/pci/controller/cadence/pci-j721e.c
8 *
9 * Author: Siddharth Vadapalli <s-vadapalli@ti.com>
10 *
11 */
12
13#include <asm/gpio.h>
14#include <clk-uclass.h>
15#include <dm.h>
16#include <dm/device_compat.h>
17#include <generic-phy.h>
18#include <linux/delay.h>
19#include <linux/io.h>
20#include <linux/ioport.h>
21#include <linux/log2.h>
Hrushikesh Salunke9418a492025-04-16 17:38:28 +053022#include <linux/sizes.h>
Siddharth Vadapalli445dbb62024-10-14 11:09:23 +053023#include <power-domain.h>
24#include <regmap.h>
25#include <syscon.h>
26
27#define CDNS_PCIE_LM_BASE 0x00100000
28#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
29#define CDNS_PCIE_LTSSM_CTRL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
30#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
31
32#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
33#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
34#define CDNS_PCIE_LM_ID_VENDOR(vid) \
35 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
36#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
37#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
38#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
39 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
40
41#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
42#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
43 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
44
45#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
46#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
47 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
48
49#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
50#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
51#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
52#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
53
54#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
55#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
56#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
57#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
58#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
59
60#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
61 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
62#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
63 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
64#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
65 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
66#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
67 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
68#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
69 (((aperture) - 2) << ((bar) * 8))
70
71#define CDNS_PCIE_RP_BASE 0x00200000
72#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
73
74/*
75 * Address Translation Registers
76 */
77#define CDNS_PCIE_AT_BASE 0x00400000
78
79/* Region r Outbound AXI to PCIe Address Translation Register 0 */
80#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
81 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
82#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
83#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
84 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
85#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
86#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
87 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
88#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
89#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
90 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
91
92/* Region r Outbound AXI to PCIe Address Translation Register 1 */
93#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
94 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
95
96/* Region r Outbound PCIe Descriptor Register 0 */
97#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
98 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
99#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
100#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
101#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
102#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
103
104/* Bit 23 MUST be set in RC mode. */
105#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
106#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
107#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
108 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
109
110/* Region r Outbound PCIe Descriptor Register 1 */
111#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
112 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
113#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
114#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
115 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
116
117/* Region r AXI Region Base Address Register 0 */
118#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
119 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
120#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
121#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
122 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
123
124/* Region r AXI Region Base Address Register 1 */
125#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
126 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
127
128/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
129#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
130 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
131#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
132#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
133 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
134#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
135 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
136
137/* AXI link down register */
138#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
139
140#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
141#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
142#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
143 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
144 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
145
146#define CDNS_PCIE_RP_MAX_IB 0x3
147
148#define LINK_TRAINING_ENABLE BIT(0)
149#define LINK_WAIT_MAX_RETRIES 10
150#define LINK_WAIT_UDELAY_MAX 100000
151#define LINK_RETRAIN_MAX_RETRIES 1000
152
153#define PCIE_USER_CMD_STATUS_REG_OFFSET 0x4
154#define PCIE_USER_LINK_STATUS_REG_OFFSET 0x14
155#define PCIE_USER_LINK_STATUS_MASK GENMASK(1, 0)
156
157#define CDNS_TI_PCIE_MODE_RC BIT(7)
158#define PCIE_MODE_SEL_MASK BIT(7)
159#define PCIE_GEN_SEL_MASK GENMASK(1, 0)
160#define PCIE_LINK_WIDTH_MASK GENMASK(9, 8)
161
162enum cdns_ti_pcie_mode {
163 PCIE_MODE_RC,
164 PCIE_MODE_EP,
165};
166
167enum cdns_pcie_rp_bar {
168 RP_BAR_UNDEFINED = -1,
169 RP_BAR0,
170 RP_BAR1,
171 RP_NO_BAR
172};
173
174static u8 bar_aperture_mask[] = {
175 [RP_BAR0] = 0x1F,
176 [RP_BAR1] = 0xF,
177};
178
179enum link_status {
180 NO_RECEIVERS_DETECTED,
181 LINK_TRAINING_IN_PROGRESS,
182 LINK_UP_DL_IN_PROGRESS,
183 LINK_UP_DL_COMPLETED,
184};
185
186struct pcie_cdns_ti_data {
187 enum cdns_ti_pcie_mode mode;
188 unsigned int quirk_retrain_flag:1;
189 unsigned int quirk_detect_quiet_flag:1;
190 unsigned int max_lanes;
191};
192
193struct pcie_cdns_ti {
194 struct udevice *dev;
195 void __iomem *intd_cfg_base;
196 void __iomem *user_cfg_base;
197 void __iomem *reg_base;
198 void __iomem *cfg_base;
199 fdt_size_t cfg_size;
200 struct regmap *syscon_base;
201 struct pci_controller *host_bridge;
202 u32 device_id;
203 u32 max_link_speed;
204 u32 num_lanes;
205 u32 pcie_ctrl_offset;
206 u32 vendor_id;
207 u32 mode;
208 unsigned int quirk_retrain_flag:1;
209 unsigned int quirk_detect_quiet_flag:1;
210 bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
211
212 /* IO, MEM & PREFETCH PCI regions */
213 struct pci_region io;
214 struct pci_region mem;
215 struct pci_region prefetch;
216};
217
218/* Cadence PCIe Controller register access helpers */
219static inline void pcie_cdns_ti_writel(struct pcie_cdns_ti *pcie, u32 reg, u32 val)
220{
221 writel(val, pcie->reg_base + reg);
222}
223
224static inline u32 pcie_cdns_ti_readl(struct pcie_cdns_ti *pcie, u32 reg)
225{
226 return readl(pcie->reg_base + reg);
227}
228
229/* Root Port register access helpers */
230static inline void pcie_cdns_ti_rp_writeb(struct pcie_cdns_ti *pcie,
231 u32 reg, u8 val)
232{
233 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
234
235 writeb(val, addr);
236}
237
238static inline void pcie_cdns_ti_rp_writew(struct pcie_cdns_ti *pcie,
239 u32 reg, u16 val)
240{
241 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
242
243 writew(val, addr);
244}
245
246static inline u16 pcie_cdns_ti_rp_readw(struct pcie_cdns_ti *pcie, u32 reg)
247{
248 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
249
250 return readw(addr);
251}
252
253/* User register access helpers */
254static inline u32 pcie_cdns_ti_user_readl(struct pcie_cdns_ti *pcie, u32 offset)
255{
256 return readl(pcie->user_cfg_base + offset);
257}
258
259static inline void pcie_cdns_ti_user_writel(struct pcie_cdns_ti *pcie, u32 offset,
260 u32 val)
261{
262 writel(val, pcie->user_cfg_base + offset);
263}
264
265void __iomem *pcie_cdns_ti_map_bus(struct pcie_cdns_ti *pcie, pci_dev_t bdf,
266 uint offset)
267{
268 int busnr, devnr, funcnr, devfn;
269 u32 addr0, desc0;
270
271 busnr = PCI_BUS(bdf);
272 devnr = PCI_DEV(bdf);
273 funcnr = PCI_FUNC(bdf);
274 devfn = (devnr << 3) | funcnr;
275
276 if (busnr == 0) {
277 if (devfn)
278 return NULL;
279 return pcie->reg_base + (offset & 0xfff);
280 }
281
282 if (!(pcie_cdns_ti_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
283 return NULL;
284
285 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
286
287 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
288 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
289 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busnr);
290 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
291
292 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
293 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
294
295 if (busnr == 1)
296 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
297 else
298 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
299
300 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
301
302 return pcie->cfg_base + (offset & 0xfff);
303}
304
305static int pcie_cdns_ti_read_config(const struct udevice *bus, pci_dev_t bdf,
306 uint offset, ulong *valuep,
307 enum pci_size_t size)
308{
309 struct pcie_cdns_ti *pcie = dev_get_priv(bus);
310 void __iomem *addr;
311 ulong value;
312
313 addr = pcie_cdns_ti_map_bus(pcie, bdf, offset & ~0x3);
314 if (!addr) {
315 debug("%s: bdf out of range\n", __func__);
316 *valuep = pci_get_ff(size);
317 return 0;
318 }
319
320 value = readl(addr);
321 *valuep = pci_conv_32_to_size(value, offset, size);
322
323 return 0;
324}
325
326static int pcie_cdns_ti_write_config(struct udevice *bus, pci_dev_t bdf,
327 uint offset, ulong value,
328 enum pci_size_t size)
329{
330 struct pcie_cdns_ti *pcie = dev_get_priv(bus);
331 void __iomem *addr;
332 ulong prev;
333
334 addr = pcie_cdns_ti_map_bus(pcie, bdf, offset & ~0x3);
335 if (!addr) {
336 debug("%s: bdf out of range\n", __func__);
337 return 0;
338 }
339
340 prev = readl(addr);
341 value = pci_conv_size_to_32(prev, value, offset, size);
342 writel(value, addr);
343
344 return 0;
345}
346
347static int pcie_cdns_ti_ctrl_init(struct pcie_cdns_ti *pcie)
348{
349 struct regmap *syscon = pcie->syscon_base;
350 u32 val = 0;
351
352 if (pcie->mode == PCIE_MODE_RC)
353 val = CDNS_TI_PCIE_MODE_RC;
354
355 /* Set mode of operation */
356 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_MODE_SEL_MASK,
357 val);
358
359 /* Set link speed */
360 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_GEN_SEL_MASK,
361 pcie->max_link_speed - 1);
362
363 /* Set link width */
364 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_LINK_WIDTH_MASK,
365 (pcie->num_lanes - 1) << 8);
366 return 0;
367}
368
369static void pcie_cdns_ti_detect_quiet_quirk(struct pcie_cdns_ti *pcie)
370{
371 u32 delay = 0x3;
372 u32 ltssm_ctrl_cap;
373
374 ltssm_ctrl_cap = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LTSSM_CTRL_CAP);
375 ltssm_ctrl_cap = ((ltssm_ctrl_cap &
376 ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
377 CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
378
379 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LTSSM_CTRL_CAP, ltssm_ctrl_cap);
380 ltssm_ctrl_cap = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LTSSM_CTRL_CAP);
381}
382
383static void pcie_cdns_ti_start_user_link(struct pcie_cdns_ti *pcie)
384{
385 u32 reg;
386
387 reg = pcie_cdns_ti_user_readl(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET);
388 reg |= LINK_TRAINING_ENABLE;
389 pcie_cdns_ti_user_writel(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET, reg);
390}
391
392static bool pcie_cdns_ti_user_link_up(struct pcie_cdns_ti *pcie)
393{
394 u32 reg;
395
396 reg = pcie_cdns_ti_user_readl(pcie, PCIE_USER_LINK_STATUS_REG_OFFSET);
397 reg &= PCIE_USER_LINK_STATUS_MASK;
398 if (reg == LINK_UP_DL_COMPLETED)
399 return true;
400
401 return false;
402}
403
404static int pcie_cdns_ti_host_wait_for_link(struct pcie_cdns_ti *pcie)
405{
406 int retries;
407
408 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
409 if (pcie_cdns_ti_user_link_up(pcie)) {
410 dev_info(pcie->dev, "link up\n");
411 return 0;
412 }
413 udelay(LINK_WAIT_UDELAY_MAX);
414 }
415
416 dev_err(pcie->dev, "failed to bring up link\n");
417 return -ETIMEDOUT;
418}
419
420static int pcie_cdns_ti_host_training_complete(struct pcie_cdns_ti *pcie)
421{
422 u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
423 int retries;
424 u16 lnk_stat;
425
426 /* Wait for link training to complete */
427 for (retries = 0; retries < LINK_RETRAIN_MAX_RETRIES; retries++) {
428 lnk_stat = pcie_cdns_ti_rp_readw(pcie, pcie_cap_off +
429 PCI_EXP_LNKSTA);
430 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
431 break;
432 udelay(1000);
433 }
434
435 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
436 return 0;
437
438 return -ETIMEDOUT;
439}
440
441static int pcie_cdns_ti_retrain_link(struct pcie_cdns_ti *pcie)
442{
443 u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
444 u16 lnk_stat, lnk_ctl;
445 int ret = 0;
446
447 lnk_cap_sls = pcie_cdns_ti_readl(pcie, (CDNS_PCIE_RP_BASE +
448 pcie_cap_off +
449 PCI_EXP_LNKCAP));
450 if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
451 return ret;
452
453 lnk_stat = pcie_cdns_ti_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
454 if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
455 lnk_ctl = pcie_cdns_ti_rp_readw(pcie,
456 pcie_cap_off + PCI_EXP_LNKCTL);
457 lnk_ctl |= PCI_EXP_LNKCTL_RL;
458 pcie_cdns_ti_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
459 lnk_ctl);
460
461 ret = pcie_cdns_ti_host_training_complete(pcie);
462 if (ret)
463 return ret;
464
465 ret = pcie_cdns_ti_host_wait_for_link(pcie);
466 }
467 return ret;
468}
469
470static int pcie_cdns_ti_start_host_link(struct pcie_cdns_ti *pcie)
471{
472 int ret;
473
474 ret = pcie_cdns_ti_host_wait_for_link(pcie);
475 if (!ret && pcie->quirk_retrain_flag)
476 ret = pcie_cdns_ti_retrain_link(pcie);
477
478 return ret;
479}
480
481static void pcie_cdns_ti_init_root_port(struct pcie_cdns_ti *pcie)
482{
483 u32 val, ctrl, id;
484
485 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
486 val = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
487 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
488 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
489 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
490 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
491 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
492 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, val);
493
494 if (pcie->vendor_id != 0xffff) {
495 id = CDNS_PCIE_LM_ID_VENDOR(pcie->vendor_id) |
496 CDNS_PCIE_LM_ID_SUBSYS(pcie->vendor_id);
497 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_ID, id);
498 }
499
500 if (pcie->device_id != 0xffff)
501 pcie_cdns_ti_rp_writew(pcie, PCI_DEVICE_ID, pcie->device_id);
502
503 pcie_cdns_ti_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
504 pcie_cdns_ti_rp_writeb(pcie, PCI_CLASS_PROG, 0);
505 pcie_cdns_ti_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
506}
507
508void pcie_cdns_ti_set_outbound_region(struct pcie_cdns_ti *pcie, u8 busnr,
509 u8 fn, u32 r, bool is_io, u64 cpu_addr,
510 u64 pci_addr, u32 size)
511{
512 u64 sz = 1ULL << fls64(size - 1);
513 int nbits = ilog2(sz);
514 u32 addr0, addr1, desc0, desc1;
515
516 if (nbits < 8)
517 nbits = 8;
518
519 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
520 (lower_32_bits(pci_addr) & GENMASK(31, 8));
521 addr1 = upper_32_bits(pci_addr);
522
523 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
524 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
525
526 if (is_io)
527 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
528 else
529 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
530 desc1 = 0;
531
532 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
533 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
534 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
535
536 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
537 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
538
539 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
540 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
541 addr1 = upper_32_bits(cpu_addr);
542
543 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
544 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
545}
546
547static int pcie_cdns_ti_bar_ib_config(struct pcie_cdns_ti *pcie,
548 enum cdns_pcie_rp_bar bar,
549 u64 cpu_addr, u64 size,
550 unsigned long flags)
551{
552 u32 addr0, addr1, aperture, value;
553
554 if (!pcie->avail_ib_bar[bar])
555 return -EBUSY;
556
557 pcie->avail_ib_bar[bar] = false;
558
559 aperture = ilog2(size);
560 addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
561 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
562 addr1 = upper_32_bits(cpu_addr);
563 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
564 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
565
566 if (bar == RP_NO_BAR)
567 return 0;
568
569 value = pcie_cdns_ti_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
570 value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
571 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
572 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
573 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
574 LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
575 if (size + cpu_addr >= SZ_4G) {
576 if (!(flags & IORESOURCE_PREFETCH))
577 value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
578 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
579 } else {
580 if (!(flags & IORESOURCE_PREFETCH))
581 value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
582 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
583 }
584
585 value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
586 pcie_cdns_ti_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
587
588 return 0;
589}
590
591static int pcie_cdns_ti_map_dma_ranges(struct pcie_cdns_ti *pcie)
592{
593 u32 no_bar_nbits = 32;
594 int ret;
595
596 /*
597 * Assume that DMA-Ranges have not been specified.
598 * TODO: Add support for "dma-ranges".
599 */
600 dev_read_u32(pcie->dev, "cdns,no-bar-match-nbits",
601 &no_bar_nbits);
602 ret = pcie_cdns_ti_bar_ib_config(pcie, RP_NO_BAR, 0x0,
603 (u64)1 << no_bar_nbits, 0);
604 if (ret)
605 dev_err(pcie->dev, "IB BAR: %d config failed\n",
606 RP_NO_BAR);
607 return ret;
608}
609
610static int pcie_cdns_ti_init_address_translation(struct pcie_cdns_ti *pcie)
611{
612 struct pci_controller *hb = pcie->host_bridge;
613 u32 addr0, addr1, desc1, region = 1;
614 u64 cpu_addr = (u64)pcie->cfg_base;
615 int i, busnr = 0;
616
617 /*
618 * Reserve region 0 for PCI configure space accesses:
619 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
620 * cdns_pci_map_bus(), other region registers are set here once for all.
621 */
622 addr1 = 0; /* Should be programmed to zero. */
623 desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
624 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
625 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
626
627 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
628 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
629 addr1 = upper_32_bits(cpu_addr);
630 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
631 pcie_cdns_ti_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
632
633 for (i = 0; i < hb->region_count; i++) {
634 if (hb->regions[i].flags == PCI_REGION_IO) {
635 pcie->io.phys_start = hb->regions[i].phys_start; /* IO base */
636 pcie->io.bus_start = hb->regions[i].bus_start; /* IO_bus_addr */
637 pcie->io.size = hb->regions[i].size; /* IO size */
638
639 pcie_cdns_ti_set_outbound_region(pcie, busnr, 0, region,
640 true, pcie->io.phys_start,
641 pcie->io.bus_start,
642 pcie->io.size);
643 } else {
644 pcie->mem.phys_start = hb->regions[i].phys_start; /* MEM base */
645 pcie->mem.bus_start = hb->regions[i].bus_start; /* MEM_bus_addr */
646 pcie->mem.size = hb->regions[i].size; /* MEM size */
647
648 pcie_cdns_ti_set_outbound_region(pcie, busnr, 0, region,
649 false, pcie->mem.phys_start,
650 pcie->mem.bus_start,
651 pcie->mem.size);
652 }
653 region++;
654 }
655
656 return pcie_cdns_ti_map_dma_ranges(pcie);
657}
658
659static int pcie_cdns_ti_host_init(struct pcie_cdns_ti *pcie)
660{
661 pcie_cdns_ti_init_root_port(pcie);
662
663 return pcie_cdns_ti_init_address_translation(pcie);
664}
665
666static int pcie_cdns_ti_setup_host(struct pcie_cdns_ti *pcie)
667{
668 enum cdns_pcie_rp_bar bar;
669 int ret;
670
671 if (pcie->quirk_detect_quiet_flag)
672 pcie_cdns_ti_detect_quiet_quirk(pcie);
673
674 pcie_cdns_ti_start_user_link(pcie);
675
676 ret = pcie_cdns_ti_start_host_link(pcie);
677 if (ret)
678 return ret;
679
680 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
681 pcie->avail_ib_bar[bar] = true;
682
683 ret = pcie_cdns_ti_host_init(pcie);
684 if (ret)
685 return ret;
686
687 return 0;
688}
689
690static int pcie_cdns_ti_probe(struct udevice *dev)
691{
692 struct pcie_cdns_ti *pcie = dev_get_priv(dev);
693 struct udevice *pci_ctlr = pci_get_controller(dev);
694 struct pci_controller *host_bridge = dev_get_uclass_priv(pci_ctlr);
695 const struct pcie_cdns_ti_data *data;
696 struct power_domain pci_pwrdmn;
697 struct gpio_desc *gpiod;
698 struct phy serdes;
699 struct clk *clk;
700 int ret;
701
702 pcie->dev = dev;
703 pcie->host_bridge = host_bridge;
704
705 data = (struct pcie_cdns_ti_data *)dev_get_driver_data(dev);
706 if (!data)
707 return -EINVAL;
708
709 pcie->mode = data->mode;
710 pcie->quirk_retrain_flag = data->quirk_retrain_flag;
711 pcie->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
712
713 if (pcie->num_lanes > data->max_lanes) {
714 dev_warn(dev, "cannot support %d lanes, defaulting to %d\n",
715 pcie->num_lanes, data->max_lanes);
716 pcie->num_lanes = data->max_lanes;
717 }
718
719 ret = power_domain_get_by_index(dev, &pci_pwrdmn, 0);
720 if (ret) {
721 dev_err(dev, "failed to get power domain\n");
722 return ret;
723 }
724
725 ret = power_domain_on(&pci_pwrdmn);
726 if (ret) {
727 dev_err(dev, "failed to power on\n");
728 return ret;
729 }
730
731 clk = devm_clk_get(dev, "fck");
732 if (IS_ERR(clk)) {
733 ret = PTR_ERR(clk);
734 dev_err(dev, "failed to get functional clock\n");
735 return ret;
736 }
737
738 ret = generic_phy_get_by_name(dev, "pcie-phy", &serdes);
739 if (ret) {
740 dev_err(dev, "unable to get serdes");
741 return ret;
742 }
743 generic_phy_reset(&serdes);
744 generic_phy_init(&serdes);
745 generic_phy_power_on(&serdes);
746
747 ret = pcie_cdns_ti_ctrl_init(pcie);
748 if (ret)
749 return ret;
750
751 gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_IS_OUT);
752 if (IS_ERR(gpiod)) {
753 ret = PTR_ERR(gpiod);
754 if (ret != -EPROBE_DEFER)
755 dev_err(dev, "Failed to get reset GPIO\n");
756 return ret;
757 }
758
759 if (gpiod) {
760 ret = dm_gpio_set_value(gpiod, 0);
761 udelay(200);
762 ret = dm_gpio_set_value(gpiod, 1);
763 if (ret)
764 return ret;
765 }
766
767 ret = pcie_cdns_ti_setup_host(pcie);
768 if (ret)
769 return ret;
770
771 return 0;
772}
773
774static int pcie_cdns_ti_of_to_plat(struct udevice *dev)
775{
776 struct pcie_cdns_ti *pcie = dev_get_priv(dev);
777 struct regmap *syscon;
778 u32 offset;
779 int ret;
780
781 pcie->intd_cfg_base = dev_remap_addr_name(dev, "intd_cfg");
782 if (!pcie->intd_cfg_base)
783 return -EINVAL;
784
785 pcie->user_cfg_base = dev_remap_addr_name(dev, "user_cfg");
786 if (!pcie->user_cfg_base)
787 return -EINVAL;
788
789 pcie->reg_base = dev_remap_addr_name(dev, "reg");
790 if (!pcie->reg_base)
791 return -EINVAL;
792
793 pcie->cfg_base = dev_remap_addr_name(dev, "cfg");
794 if (!pcie->cfg_base)
795 return -EINVAL;
796
797 pcie->vendor_id = 0xffff;
798 pcie->device_id = 0xffff;
799 dev_read_u32(dev, "vendor-id", &pcie->vendor_id);
800 dev_read_u32(dev, "device-id", &pcie->device_id);
801
802 ret = dev_read_u32(dev, "num-lanes", &pcie->num_lanes);
803 if (ret)
804 return ret;
805
806 ret = dev_read_u32(dev, "max-link-speed", &pcie->max_link_speed);
807 if (ret)
808 return ret;
809
810 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-pcie-ctrl");
811 if (IS_ERR(syscon)) {
812 if (PTR_ERR(syscon) == -ENODEV)
813 return 0;
814 return PTR_ERR(syscon);
815 }
816
817 ret = dev_read_u32_index(dev, "ti,syscon-pcie-ctrl", 1, &offset);
818 if (ret)
819 return ret;
820
821 pcie->syscon_base = syscon;
822 pcie->pcie_ctrl_offset = offset;
823
824 return 0;
825}
826
827static const struct dm_pci_ops pcie_cdns_ti_ops = {
828 .read_config = pcie_cdns_ti_read_config,
829 .write_config = pcie_cdns_ti_write_config,
830};
831
832static const struct pcie_cdns_ti_data j7200_pcie_rc_data = {
833 .mode = PCIE_MODE_RC,
834 .quirk_detect_quiet_flag = true,
835 .max_lanes = 2,
836};
837
Hrushikesh Salunkeade2e762025-04-16 17:38:29 +0530838static const struct pcie_cdns_ti_data am64_pcie_rc_data = {
839 .mode = PCIE_MODE_RC,
840 .quirk_detect_quiet_flag = true,
841 .max_lanes = 1,
842};
843
Siddharth Vadapalli445dbb62024-10-14 11:09:23 +0530844static const struct udevice_id pcie_cdns_ti_ids[] = {
845 {
846 .compatible = "ti,j7200-pcie-host",
847 .data = (ulong)&j7200_pcie_rc_data,
848 },
Hrushikesh Salunkeade2e762025-04-16 17:38:29 +0530849 {
850 .compatible = "ti,am64-pcie-host",
851 .data = (ulong)&am64_pcie_rc_data,
852 },
Siddharth Vadapalli445dbb62024-10-14 11:09:23 +0530853 {},
854};
855
856U_BOOT_DRIVER(pcie_cdns_ti) = {
857 .name = "pcie_cdns_ti",
858 .id = UCLASS_PCI,
859 .of_match = pcie_cdns_ti_ids,
860 .ops = &pcie_cdns_ti_ops,
861 .of_to_plat = pcie_cdns_ti_of_to_plat,
862 .probe = pcie_cdns_ti_probe,
863 .priv_auto = sizeof(struct pcie_cdns_ti),
864};