blob: 59c17d0f1e49306f49a6c4c6c1473f0681c11ab8 [file] [log] [blame]
Hrushikesh Salunke74bfd1f2025-06-16 22:19:28 +05301// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/*
3 * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com
4 *
5 * PCIe Endpoint controller driver for TI's K3 SoCs with Cadence PCIe controller
6 *
7 * Ported from the Linux driver - drivers/pci/controller/cadence/pci-j721e.c
8 *
9 * Author: Hrushikesh Salunke <h-salunke@ti.com>
10 *
11 */
12
13#include <clk.h>
14#include <dm.h>
15#include <dm/device_compat.h>
16#include <generic-phy.h>
17#include <linux/log2.h>
18#include <linux/sizes.h>
19#include <power-domain.h>
20#include <regmap.h>
21#include <syscon.h>
22#include <pcie-cadence.h>
23#include <pci_ep.h>
24
25#define PCIE_USER_CMD_STATUS_REG_OFFSET 0x4
26#define LINK_TRAINING_ENABLE BIT(0)
27
28#define PCIE_MODE_SEL_MASK BIT(7)
29#define PCIE_GEN_SEL_MASK GENMASK(1, 0)
30#define PCIE_LINK_WIDTH_MASK GENMASK(9, 8)
31
32struct pcie_cdns_ti_ep_data {
33 unsigned int quirk_retrain_flag:1;
34 unsigned int quirk_detect_quiet_flag:1;
35 unsigned int quirk_disable_flr:1;
36 unsigned int byte_access_allowed:1;
37 unsigned int max_lanes;
38};
39
40struct pcie_cdns_ti_ep {
41 struct udevice *dev;
42 void __iomem *intd_cfg_base;
43 void __iomem *user_cfg_base;
44 void __iomem *reg_base;
45 void __iomem *mem_base;
46 fdt_size_t cfg_size;
47 struct regmap *syscon_base;
48 u32 max_link_speed;
49 u32 num_lanes;
50 u32 pcie_ctrl_offset;
51 unsigned int quirk_retrain_flag:1;
52 unsigned int quirk_detect_quiet_flag:1;
53 unsigned int quirk_disable_flr:1;
54 unsigned int byte_access_allowed:1;
55};
56
57static inline u32 pcie_cdns_ti_ep_user_readl(struct pcie_cdns_ti_ep *pcie, u32 offset)
58{
59 return readl(pcie->user_cfg_base + offset);
60}
61
62static inline void pcie_cdns_ti_ep_user_writel(struct pcie_cdns_ti_ep *pcie, u32 offset,
63 u32 val)
64{
65 writel(val, pcie->user_cfg_base + offset);
66}
67
68static void pcie_cdns_ti_start_link(struct pcie_cdns_ti_ep *pcie)
69{
70 u32 reg;
71
72 reg = pcie_cdns_ti_ep_user_readl(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET);
73 reg |= LINK_TRAINING_ENABLE;
74 pcie_cdns_ti_ep_user_writel(pcie, PCIE_USER_CMD_STATUS_REG_OFFSET, reg);
75}
76
77static int pcie_cdns_reset(struct udevice *dev, struct power_domain *pci_pwrdmn)
78{
79 int ret;
80
81 ret = power_domain_off(pci_pwrdmn);
82 if (ret) {
83 dev_err(dev, "failed to power off\n");
84 return ret;
85 }
86
87 ret = power_domain_on(pci_pwrdmn);
88 if (ret) {
89 dev_err(dev, "failed to power on: %d\n", ret);
90 return ret;
91 }
92
93 return 0;
94}
95
96static int pcie_cdns_config_serdes(struct udevice *dev)
97{
98 if (CONFIG_IS_ENABLED(PHY_CADENCE_TORRENT)) {
99 struct phy serdes;
100 int ret = 7;
101
102 ret = generic_phy_get_by_name(dev, "pcie-phy", &serdes);
103 if (ret != 0 && ret != -EBUSY) {
104 dev_err(dev, "unable to get serdes\n");
105 return ret;
106 }
107 generic_phy_reset(&serdes);
108 generic_phy_init(&serdes);
109 generic_phy_power_on(&serdes);
110 } else {
111 dev_info(dev, "Proceeding with the assumption that the SERDES is already configured\n");
112 }
113 return 0;
114}
115
116static int pcie_cdns_ti_ctrl_init(struct pcie_cdns_ti_ep *pcie)
117{
118 struct regmap *syscon = pcie->syscon_base;
119 u32 val = 0;
120
121 /* Set mode of operation */
122 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_MODE_SEL_MASK,
123 val);
124
125 /* Set link speed */
126 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_GEN_SEL_MASK,
127 pcie->max_link_speed - 1);
128
129 /* Set link width */
130 regmap_update_bits(syscon, pcie->pcie_ctrl_offset, PCIE_LINK_WIDTH_MASK,
131 (pcie->num_lanes - 1) << 8);
132 return 0;
133}
134
135static int pcie_cdns_ti_write_header(struct udevice *dev, uint fn,
136 struct pci_ep_header *hdr)
137{
138 struct pcie_cdns_ti_ep *pcie_ep = dev_get_priv(dev);
139 struct cdns_pcie pcie;
140
141 pcie.reg_base = pcie_ep->reg_base;
142
143 cdns_pcie_ep_fn_writew(&pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
144 cdns_pcie_ep_fn_writeb(&pcie, fn, PCI_REVISION_ID, hdr->revid);
145 cdns_pcie_ep_fn_writeb(&pcie, fn, PCI_CLASS_PROG,
146 hdr->progif_code);
147 cdns_pcie_ep_fn_writew(&pcie, fn, PCI_CLASS_DEVICE,
148 hdr->subclass_code |
149 hdr->baseclass_code << 8);
150 cdns_pcie_ep_fn_writeb(&pcie, fn, PCI_CACHE_LINE_SIZE,
151 hdr->cache_line_size);
152 cdns_pcie_ep_fn_writew(&pcie, fn, PCI_SUBSYSTEM_ID,
153 hdr->subsys_id);
154 cdns_pcie_ep_fn_writeb(&pcie, fn, PCI_INTERRUPT_PIN,
155 hdr->interrupt_pin);
156
157 /*
158 * Vendor ID can only be modified from function 0, all other functions
159 * use the same vendor ID as function 0.
160 */
161 if (fn == 0) {
162 /* Update the vendor IDs. */
163 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
164 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
165
166 cdns_pcie_writel(&pcie, CDNS_PCIE_LM_ID, id);
167 }
168
169 return 0;
170}
171
172static int pcie_cdns_ti_set_bar(struct udevice *dev, uint fn,
173 struct pci_bar *ep_bar)
174{
175 struct pcie_cdns_ti_ep *pcie_ep = dev_get_priv(dev);
176 struct cdns_pcie pcie;
177 dma_addr_t bar_phys = ep_bar->phys_addr;
178 enum pci_barno bar = ep_bar->barno;
179 int flags = ep_bar->flags;
180 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
181 u64 sz;
182
183 pcie.reg_base = pcie_ep->reg_base;
184
185 /* BAR size is 2^(aperture + 7) */
186 sz = max_t(size_t, ep_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
187 /*
188 * roundup_pow_of_two() returns an unsigned long, which is not suited
189 * for 64bit values.
190 */
191 sz = 1ULL << fls64(sz - 1);
192 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
193
194 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
195 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
196 } else {
197 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
198 bool is_64bits = (sz > SZ_2G) |
199 !!(ep_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
200
201 if (is_64bits && (bar & 1))
202 return -EINVAL;
203
204 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
205 ep_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
206
207 if (is_64bits && is_prefetch)
208 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
209 else if (is_prefetch)
210 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
211 else if (is_64bits)
212 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
213 else
214 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
215 }
216
217 addr0 = lower_32_bits(bar_phys);
218 addr1 = upper_32_bits(bar_phys);
219 cdns_pcie_writel(&pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
220 addr0);
221 cdns_pcie_writel(&pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
222 addr1);
223
224 /*
225 * Cadence PCIe controller provides a register interface to configure
226 * BAR of an Endpoint function. Per function there are two BAR configuration
227 * registers, out of which first is used to configure BAR_0 to BAR_4 and
228 * second is used to configure the remaining BARs.
229 */
230 if (bar < BAR_4) {
231 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
232 b = bar;
233 } else {
234 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
235 b = bar - BAR_4;
236 }
237
238 cfg = cdns_pcie_readl(&pcie, reg);
239
240 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
241 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
242 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
243 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
244 cdns_pcie_writel(&pcie, reg, cfg);
245
246 cfg = cdns_pcie_readl(&pcie, reg);
247
248 return 0;
249}
250
251static int pcie_cdns_ti_start(struct udevice *dev)
252{
253 struct pcie_cdns_ti_ep *pcie = dev_get_priv(dev);
254
255 pcie_cdns_ti_start_link(pcie);
256
257 return 0;
258}
259
260static int pcie_cdns_ti_ep_probe(struct udevice *dev)
261{
262 struct pcie_cdns_ti_ep *pcie = dev_get_priv(dev);
263 struct pcie_cdns_ti_ep_data *data;
264 struct power_domain pci_pwrdmn;
265 struct clk *clk;
266 int ret;
267
268 pcie->dev = dev;
269 data = (struct pcie_cdns_ti_ep_data *)dev_get_driver_data(dev);
270 if (!data)
271 return -EINVAL;
272
273 pcie->quirk_retrain_flag = data->quirk_retrain_flag;
274 pcie->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
275 pcie->quirk_disable_flr = data->quirk_disable_flr;
276
277 if (pcie->num_lanes > data->max_lanes) {
278 dev_warn(dev, "cannot support %d lanes, defaulting to %d\n",
279 pcie->num_lanes, data->max_lanes);
280 pcie->num_lanes = data->max_lanes;
281 }
282
283 ret = power_domain_get_by_index(dev, &pci_pwrdmn, 0);
284 if (ret) {
285 dev_err(dev, "failed to get power domain: %d\n", ret);
286 return ret;
287 }
288
289 /*
290 * Reset the PCIe controller so that newly configured BAR
291 * values are reflected.
292 */
293 ret = pcie_cdns_reset(dev, &pci_pwrdmn);
294 if (ret) {
295 dev_err(dev, "failed to reset controller: %d\n", ret);
296 return ret;
297 }
298
299 clk = devm_clk_get(dev, "fck");
300 if (IS_ERR(clk)) {
301 ret = PTR_ERR(clk);
302 dev_err(dev, "failed to get functional clock\n");
303 return ret;
304 }
305
306 ret = pcie_cdns_config_serdes(dev);
307 if (ret) {
308 dev_err(dev, "failed to configure serdes: %d\n", ret);
309 return ret;
310 }
311
312 ret = pcie_cdns_ti_ctrl_init(pcie);
313 if (ret) {
314 dev_err(dev, "failed to initialize controller: %d\n", ret);
315 return ret;
316 }
317
318 return 0;
319}
320
321static int pcie_cdns_ti_ep_of_to_plat(struct udevice *dev)
322{
323 struct pcie_cdns_ti_ep *pcie = dev_get_priv(dev);
324 struct regmap *syscon;
325 u32 offset;
326 int ret;
327
328 pcie->intd_cfg_base = dev_remap_addr_name(dev, "intd_cfg");
329 if (!pcie->intd_cfg_base)
330 return -EINVAL;
331
332 pcie->user_cfg_base = dev_remap_addr_name(dev, "user_cfg");
333 if (!pcie->user_cfg_base)
334 return -EINVAL;
335
336 pcie->reg_base = dev_remap_addr_name(dev, "reg");
337 if (!pcie->reg_base)
338 return -EINVAL;
339
340 pcie->mem_base = dev_remap_addr_name(dev, "mem");
341 if (!pcie->mem_base)
342 return -EINVAL;
343
344 ret = dev_read_u32(dev, "num-lanes", &pcie->num_lanes);
345 if (ret)
346 return ret;
347
348 ret = dev_read_u32(dev, "max-link-speed", &pcie->max_link_speed);
349 if (ret)
350 return ret;
351
352 syscon = syscon_regmap_lookup_by_phandle(dev, "ti,syscon-pcie-ctrl");
353 if (IS_ERR(syscon)) {
354 if (PTR_ERR(syscon) == -ENODEV)
355 return 0;
356 return PTR_ERR(syscon);
357 }
358
359 ret = dev_read_u32_index(dev, "ti,syscon-pcie-ctrl", 1, &offset);
360 if (ret)
361 return ret;
362
363 pcie->syscon_base = syscon;
364 pcie->pcie_ctrl_offset = offset;
365
366 return 0;
367}
368
369static const struct pci_ep_ops pcie_cdns_ti_ep_ops = {
370 .write_header = pcie_cdns_ti_write_header,
371 .set_bar = pcie_cdns_ti_set_bar,
372 .start = pcie_cdns_ti_start,
373};
374
375static const struct pcie_cdns_ti_ep_data am64_pcie_ep_data = {
376 .max_lanes = 1,
377};
378
379static const struct udevice_id pcie_cdns_ti_ep_ids[] = {
380 {
381 .compatible = "ti,am64-pcie-ep",
382 .data = (ulong)&am64_pcie_ep_data,
383 },
384 {},
385};
386
387U_BOOT_DRIVER(pcie_cdns_ti_ep) = {
388 .name = "pcie_cdns_ti_ep",
389 .id = UCLASS_PCI_EP,
390 .of_match = pcie_cdns_ti_ep_ids,
391 .ops = &pcie_cdns_ti_ep_ops,
392 .of_to_plat = pcie_cdns_ti_ep_of_to_plat,
393 .probe = pcie_cdns_ti_ep_probe,
394 .priv_auto = sizeof(struct pcie_cdns_ti_ep),
395};