blob: 255e73181d59bf7947def4c29c09e849eac1f217 [file] [log] [blame]
Hou Zhiqiange5d79c42019-04-08 10:15:46 +00001// SPDX-License-Identifier: GPL-2.0+ OR X11
2/*
Wasim Khanc892b9f2020-09-28 16:26:05 +05303 * Copyright 2018-2020 NXP
Hou Zhiqiange5d79c42019-04-08 10:15:46 +00004 *
5 * PCIe Gen4 driver for NXP Layerscape SoCs
6 * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
7 */
8
9#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Hou Zhiqiange5d79c42019-04-08 10:15:46 +000011#include <asm/arch/fsl_serdes.h>
12#include <pci.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Hou Zhiqiange5d79c42019-04-08 10:15:46 +000014#include <asm/io.h>
15#include <errno.h>
16#include <malloc.h>
17#include <dm.h>
18#include <linux/sizes.h>
19
20#include "pcie_layerscape_gen4.h"
21
22DECLARE_GLOBAL_DATA_PTR;
23
24LIST_HEAD(ls_pcie_g4_list);
25
26static u64 bar_size[4] = {
27 PCIE_BAR0_SIZE,
28 PCIE_BAR1_SIZE,
29 PCIE_BAR2_SIZE,
30 PCIE_BAR4_SIZE
31};
32
33static int ls_pcie_g4_ltssm(struct ls_pcie_g4 *pcie)
34{
35 u32 state;
36
37 state = pf_ctrl_readl(pcie, PCIE_LTSSM_STA) & LTSSM_STATE_MASK;
38
39 return state;
40}
41
42static int ls_pcie_g4_link_up(struct ls_pcie_g4 *pcie)
43{
44 int ltssm;
45
46 ltssm = ls_pcie_g4_ltssm(pcie);
47 if (ltssm != LTSSM_PCIE_L0)
48 return 0;
49
50 return 1;
51}
52
53static void ls_pcie_g4_ep_enable_cfg(struct ls_pcie_g4 *pcie)
54{
55 ccsr_writel(pcie, GPEX_CFG_READY, PCIE_CONFIG_READY);
56}
57
58static void ls_pcie_g4_cfg_set_target(struct ls_pcie_g4 *pcie, u32 target)
59{
60 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
61 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
62}
63
64static int ls_pcie_g4_outbound_win_set(struct ls_pcie_g4 *pcie, int idx,
65 int type, u64 phys, u64 bus_addr,
66 pci_size_t size)
67{
68 u32 val;
69 u32 size_h, size_l;
70
71 if (idx >= PAB_WINS_NUM)
72 return -EINVAL;
73
74 size_h = upper_32_bits(~(size - 1));
75 size_l = lower_32_bits(~(size - 1));
76
77 val = ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(idx));
78 val &= ~((AXI_AMAP_CTRL_TYPE_MASK << AXI_AMAP_CTRL_TYPE_SHIFT) |
79 (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT) |
80 AXI_AMAP_CTRL_EN);
81 val |= ((type & AXI_AMAP_CTRL_TYPE_MASK) << AXI_AMAP_CTRL_TYPE_SHIFT) |
82 ((size_l >> AXI_AMAP_CTRL_SIZE_SHIFT) <<
83 AXI_AMAP_CTRL_SIZE_SHIFT) | AXI_AMAP_CTRL_EN;
84
85 ccsr_writel(pcie, PAB_AXI_AMAP_CTRL(idx), val);
86
87 ccsr_writel(pcie, PAB_AXI_AMAP_AXI_WIN(idx), lower_32_bits(phys));
88 ccsr_writel(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(idx), upper_32_bits(phys));
89 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
90 ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
91 ccsr_writel(pcie, PAB_EXT_AXI_AMAP_SIZE(idx), size_h);
92
93 return 0;
94}
95
96static int ls_pcie_g4_rc_inbound_win_set(struct ls_pcie_g4 *pcie, int idx,
97 int type, u64 phys, u64 bus_addr,
98 pci_size_t size)
99{
100 u32 val;
101 pci_size_t win_size = ~(size - 1);
102
103 val = ccsr_readl(pcie, PAB_PEX_AMAP_CTRL(idx));
104
105 val &= ~(PEX_AMAP_CTRL_TYPE_MASK << PEX_AMAP_CTRL_TYPE_SHIFT);
106 val &= ~(PEX_AMAP_CTRL_EN_MASK << PEX_AMAP_CTRL_EN_SHIFT);
107 val = (val | (type << PEX_AMAP_CTRL_TYPE_SHIFT));
108 val = (val | (1 << PEX_AMAP_CTRL_EN_SHIFT));
109
110 ccsr_writel(pcie, PAB_PEX_AMAP_CTRL(idx),
111 val | lower_32_bits(win_size));
112
113 ccsr_writel(pcie, PAB_EXT_PEX_AMAP_SIZE(idx), upper_32_bits(win_size));
114 ccsr_writel(pcie, PAB_PEX_AMAP_AXI_WIN(idx), lower_32_bits(phys));
115 ccsr_writel(pcie, PAB_EXT_PEX_AMAP_AXI_WIN(idx), upper_32_bits(phys));
116 ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
117 ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
118
119 return 0;
120}
121
122static void ls_pcie_g4_dump_wins(struct ls_pcie_g4 *pcie, int wins)
123{
124 int i;
125
126 for (i = 0; i < wins; i++) {
127 debug("APIO Win%d:\n", i);
128 debug("\tLOWER PHYS: 0x%08x\n",
129 ccsr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(i)));
130 debug("\tUPPER PHYS: 0x%08x\n",
131 ccsr_readl(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(i)));
132 debug("\tLOWER BUS: 0x%08x\n",
133 ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_L(i)));
134 debug("\tUPPER BUS: 0x%08x\n",
135 ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(i)));
136 debug("\tSIZE: 0x%08x\n",
137 ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)) &
138 (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT));
139 debug("\tEXT_SIZE: 0x%08x\n",
140 ccsr_readl(pcie, PAB_EXT_AXI_AMAP_SIZE(i)));
141 debug("\tPARAM: 0x%08x\n",
142 ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(i)));
143 debug("\tCTRL: 0x%08x\n",
144 ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)));
145 }
146}
147
148static void ls_pcie_g4_setup_wins(struct ls_pcie_g4 *pcie)
149{
150 struct pci_region *io, *mem, *pref;
151 int idx = 1;
152
153 /* INBOUND WIN */
154 ls_pcie_g4_rc_inbound_win_set(pcie, 0, IB_TYPE_MEM_F, 0, 0, SIZE_1T);
155
156 /* OUTBOUND WIN 0: CFG */
157 ls_pcie_g4_outbound_win_set(pcie, 0, PAB_AXI_TYPE_CFG,
158 pcie->cfg_res.start, 0,
159 fdt_resource_size(&pcie->cfg_res));
160
161 pci_get_regions(pcie->bus, &io, &mem, &pref);
162
163 if (io)
164 /* OUTBOUND WIN: IO */
165 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_IO,
166 io->phys_start, io->bus_start,
167 io->size);
168
169 if (mem)
170 /* OUTBOUND WIN: MEM */
171 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
172 mem->phys_start, mem->bus_start,
173 mem->size);
174
175 if (pref)
176 /* OUTBOUND WIN: perf MEM */
177 ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
178 pref->phys_start, pref->bus_start,
179 pref->size);
180
181 ls_pcie_g4_dump_wins(pcie, idx);
182}
183
184/* Return 0 if the address is valid, -errno if not valid */
185static int ls_pcie_g4_addr_valid(struct ls_pcie_g4 *pcie, pci_dev_t bdf)
186{
187 struct udevice *bus = pcie->bus;
188
189 if (pcie->mode == PCI_HEADER_TYPE_NORMAL)
190 return -ENODEV;
191
192 if (!pcie->enabled)
193 return -ENXIO;
194
Simon Glass75e534b2020-12-16 21:20:07 -0700195 if (PCI_BUS(bdf) < dev_seq(bus))
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000196 return -EINVAL;
197
Simon Glass75e534b2020-12-16 21:20:07 -0700198 if ((PCI_BUS(bdf) > dev_seq(bus)) && (!ls_pcie_g4_link_up(pcie)))
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000199 return -EINVAL;
200
Simon Glass75e534b2020-12-16 21:20:07 -0700201 if (PCI_BUS(bdf) <= (dev_seq(bus) + 1) && (PCI_DEV(bdf) > 0))
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000202 return -EINVAL;
203
204 return 0;
205}
206
207void *ls_pcie_g4_conf_address(struct ls_pcie_g4 *pcie, pci_dev_t bdf,
208 int offset)
209{
210 struct udevice *bus = pcie->bus;
211 u32 target;
212
Simon Glass75e534b2020-12-16 21:20:07 -0700213 if (PCI_BUS(bdf) == dev_seq(bus)) {
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000214 if (offset < INDIRECT_ADDR_BNDRY) {
215 ccsr_set_page(pcie, 0);
216 return pcie->ccsr + offset;
217 }
218
219 ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
220 return pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset);
221 }
222
Simon Glass75e534b2020-12-16 21:20:07 -0700223 target = PAB_TARGET_BUS(PCI_BUS(bdf) - dev_seq(bus)) |
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000224 PAB_TARGET_DEV(PCI_DEV(bdf)) |
225 PAB_TARGET_FUNC(PCI_FUNC(bdf));
226
227 ls_pcie_g4_cfg_set_target(pcie, target);
228
229 return pcie->cfg + offset;
230}
231
Simon Glass2a311e82020-01-27 08:49:37 -0700232static int ls_pcie_g4_read_config(const struct udevice *bus, pci_dev_t bdf,
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000233 uint offset, ulong *valuep,
234 enum pci_size_t size)
235{
236 struct ls_pcie_g4 *pcie = dev_get_priv(bus);
237 void *address;
238 int ret = 0;
239
240 if (ls_pcie_g4_addr_valid(pcie, bdf)) {
241 *valuep = pci_get_ff(size);
242 return 0;
243 }
244
245 address = ls_pcie_g4_conf_address(pcie, bdf, offset);
246
247 switch (size) {
248 case PCI_SIZE_8:
249 *valuep = readb(address);
250 break;
251 case PCI_SIZE_16:
252 *valuep = readw(address);
253 break;
254 case PCI_SIZE_32:
255 *valuep = readl(address);
256 break;
257 default:
258 ret = -EINVAL;
259 break;
260 }
261
262 return ret;
263}
264
265static int ls_pcie_g4_write_config(struct udevice *bus, pci_dev_t bdf,
266 uint offset, ulong value,
267 enum pci_size_t size)
268{
269 struct ls_pcie_g4 *pcie = dev_get_priv(bus);
270 void *address;
271
272 if (ls_pcie_g4_addr_valid(pcie, bdf))
273 return 0;
274
275 address = ls_pcie_g4_conf_address(pcie, bdf, offset);
276
277 switch (size) {
278 case PCI_SIZE_8:
279 writeb(value, address);
280 return 0;
281 case PCI_SIZE_16:
282 writew(value, address);
283 return 0;
284 case PCI_SIZE_32:
285 writel(value, address);
286 return 0;
287 default:
288 return -EINVAL;
289 }
290}
291
292static void ls_pcie_g4_setup_ctrl(struct ls_pcie_g4 *pcie)
293{
294 u32 val;
295
296 /* Fix class code */
297 val = ccsr_readl(pcie, GPEX_CLASSCODE);
298 val &= ~(GPEX_CLASSCODE_MASK << GPEX_CLASSCODE_SHIFT);
299 val |= PCI_CLASS_BRIDGE_PCI << GPEX_CLASSCODE_SHIFT;
300 ccsr_writel(pcie, GPEX_CLASSCODE, val);
301
302 /* Enable APIO and Memory/IO/CFG Wins */
303 val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
304 val |= APIO_EN | MEM_WIN_EN | IO_WIN_EN | CFG_WIN_EN;
305 ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
306
307 ls_pcie_g4_setup_wins(pcie);
308
309 pcie->stream_id_cur = 0;
310}
311
312static void ls_pcie_g4_ep_inbound_win_set(struct ls_pcie_g4 *pcie, int pf,
313 int bar, u64 phys)
314{
315 u32 val;
316
317 /* PF BAR1 is for MSI-X and only need to enable */
318 if (bar == 1) {
319 ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), BAR_AMAP_EN);
320 return;
321 }
322
323 val = upper_32_bits(phys);
324 ccsr_writel(pcie, PAB_EXT_PEX_BAR_AMAP(pf, bar), val);
325 val = lower_32_bits(phys) | BAR_AMAP_EN;
326 ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), val);
327}
328
329static void ls_pcie_g4_ep_setup_wins(struct ls_pcie_g4 *pcie, int pf)
330{
331 u64 phys;
332 int bar;
333 u32 val;
334
335 if ((!pcie->sriov_support && pf > LS_G4_PF0) || pf > LS_G4_PF1)
336 return;
337
338 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR_SIZE * 4 * pf;
339 for (bar = 0; bar < PF_BAR_NUM; bar++) {
340 ls_pcie_g4_ep_inbound_win_set(pcie, pf, bar, phys);
341 phys += PCIE_BAR_SIZE;
342 }
343
344 /* OUTBOUND: map MEM */
345 ls_pcie_g4_outbound_win_set(pcie, pf, PAB_AXI_TYPE_MEM,
346 pcie->cfg_res.start +
347 CONFIG_SYS_PCI_MEMORY_SIZE * pf, 0x0,
348 CONFIG_SYS_PCI_MEMORY_SIZE);
349
350 val = ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf));
351 val &= ~FUNC_NUM_PCIE_MASK;
352 val |= pf;
353 ccsr_writel(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf), val);
354}
355
356static void ls_pcie_g4_ep_enable_bar(struct ls_pcie_g4 *pcie, int pf,
357 int bar, bool vf_bar, bool enable)
358{
359 u32 val;
360 u32 bar_pos = BAR_POS(bar, pf, vf_bar);
361
362 val = ccsr_readl(pcie, GPEX_BAR_ENABLE);
363 if (enable)
364 val |= 1 << bar_pos;
365 else
366 val &= ~(1 << bar_pos);
367 ccsr_writel(pcie, GPEX_BAR_ENABLE, val);
368}
369
370static void ls_pcie_g4_ep_set_bar_size(struct ls_pcie_g4 *pcie, int pf,
371 int bar, bool vf_bar, u64 size)
372{
373 u32 bar_pos = BAR_POS(bar, pf, vf_bar);
374 u32 mask_l = lower_32_bits(~(size - 1));
375 u32 mask_h = upper_32_bits(~(size - 1));
376
377 ccsr_writel(pcie, GPEX_BAR_SELECT, bar_pos);
378 ccsr_writel(pcie, GPEX_BAR_SIZE_LDW, mask_l);
379 ccsr_writel(pcie, GPEX_BAR_SIZE_UDW, mask_h);
380}
381
382static void ls_pcie_g4_ep_setup_bar(struct ls_pcie_g4 *pcie, int pf,
383 int bar, bool vf_bar, u64 size)
384{
385 bool en = size ? true : false;
386
387 ls_pcie_g4_ep_enable_bar(pcie, pf, bar, vf_bar, en);
388 ls_pcie_g4_ep_set_bar_size(pcie, pf, bar, vf_bar, size);
389}
390
391static void ls_pcie_g4_ep_setup_bars(struct ls_pcie_g4 *pcie, int pf)
392{
393 int bar;
394
395 /* Setup PF BARs */
396 for (bar = 0; bar < PF_BAR_NUM; bar++)
397 ls_pcie_g4_ep_setup_bar(pcie, pf, bar, false, bar_size[bar]);
398
399 if (!pcie->sriov_support)
400 return;
401
402 /* Setup VF BARs */
403 for (bar = 0; bar < VF_BAR_NUM; bar++)
404 ls_pcie_g4_ep_setup_bar(pcie, pf, bar, true, bar_size[bar]);
405}
406
407static void ls_pcie_g4_set_sriov(struct ls_pcie_g4 *pcie, int pf)
408{
409 unsigned int val;
410
411 val = ccsr_readl(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf));
412 val &= ~(TTL_VF_MASK << TTL_VF_SHIFT);
413 val |= PCIE_VF_NUM << TTL_VF_SHIFT;
414 val &= ~(INI_VF_MASK << INI_VF_SHIFT);
415 val |= PCIE_VF_NUM << INI_VF_SHIFT;
416 ccsr_writel(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf), val);
417
418 val = ccsr_readl(pcie, PCIE_SRIOV_VF_OFFSET_STRIDE);
419 val += PCIE_VF_NUM * pf - pf;
420 ccsr_writel(pcie, GPEX_SRIOV_VF_OFFSET_STRIDE(pf), val);
421}
422
423static void ls_pcie_g4_setup_ep(struct ls_pcie_g4 *pcie)
424{
425 u32 pf, sriov;
426 u32 val;
427 int i;
428
429 /* Enable APIO and Memory Win */
430 val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
431 val |= APIO_EN | MEM_WIN_EN;
432 ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
433
434 sriov = ccsr_readl(pcie, PCIE_SRIOV_CAPABILITY);
435 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
436 pcie->sriov_support = 1;
437
438 pf = pcie->sriov_support ? PCIE_PF_NUM : 1;
439
440 for (i = 0; i < pf; i++) {
441 ls_pcie_g4_ep_setup_bars(pcie, i);
442 ls_pcie_g4_ep_setup_wins(pcie, i);
443 if (pcie->sriov_support)
444 ls_pcie_g4_set_sriov(pcie, i);
445 }
446
447 ls_pcie_g4_ep_enable_cfg(pcie);
448 ls_pcie_g4_dump_wins(pcie, pf);
449}
450
451static int ls_pcie_g4_probe(struct udevice *dev)
452{
453 struct ls_pcie_g4 *pcie = dev_get_priv(dev);
454 const void *fdt = gd->fdt_blob;
455 int node = dev_of_offset(dev);
456 u32 link_ctrl_sta;
457 u32 val;
458 int ret;
Wasim Khan6c45b1a2020-09-28 16:26:14 +0530459 fdt_size_t cfg_size;
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000460
461 pcie->bus = dev;
462
463 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
464 "ccsr", &pcie->ccsr_res);
465 if (ret) {
466 printf("ls-pcie-g4: resource \"ccsr\" not found\n");
467 return ret;
468 }
469
470 pcie->idx = (pcie->ccsr_res.start - PCIE_SYS_BASE_ADDR) /
471 PCIE_CCSR_SIZE;
472
473 list_add(&pcie->list, &ls_pcie_g4_list);
474
475 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
476 if (!pcie->enabled) {
Wasim Khanc892b9f2020-09-28 16:26:05 +0530477 printf("PCIe%d: %s disabled\n", PCIE_SRDS_PRTCL(pcie->idx),
478 dev->name);
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000479 return 0;
480 }
481
482 pcie->ccsr = map_physmem(pcie->ccsr_res.start,
483 fdt_resource_size(&pcie->ccsr_res),
484 MAP_NOCACHE);
485
486 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
487 "config", &pcie->cfg_res);
488 if (ret) {
489 printf("%s: resource \"config\" not found\n", dev->name);
490 return ret;
491 }
492
Wasim Khan6c45b1a2020-09-28 16:26:14 +0530493 cfg_size = fdt_resource_size(&pcie->cfg_res);
494 if (cfg_size < SZ_4K) {
495 printf("PCIe%d: %s Invalid size(0x%llx) for resource \"config\",expected minimum 0x%x\n",
496 PCIE_SRDS_PRTCL(pcie->idx), dev->name, cfg_size, SZ_4K);
497 return 0;
498 }
499
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000500 pcie->cfg = map_physmem(pcie->cfg_res.start,
501 fdt_resource_size(&pcie->cfg_res),
502 MAP_NOCACHE);
503
504 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
505 "lut", &pcie->lut_res);
506 if (ret) {
507 printf("ls-pcie-g4: resource \"lut\" not found\n");
508 return ret;
509 }
510
511 pcie->lut = map_physmem(pcie->lut_res.start,
512 fdt_resource_size(&pcie->lut_res),
513 MAP_NOCACHE);
514
515 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
516 "pf_ctrl", &pcie->pf_ctrl_res);
517 if (ret) {
518 printf("ls-pcie-g4: resource \"pf_ctrl\" not found\n");
519 return ret;
520 }
521
522 pcie->pf_ctrl = map_physmem(pcie->pf_ctrl_res.start,
523 fdt_resource_size(&pcie->pf_ctrl_res),
524 MAP_NOCACHE);
525
526 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
527
528 debug("%s ccsr:%lx, cfg:0x%lx, big-endian:%d\n",
529 dev->name, (unsigned long)pcie->ccsr, (unsigned long)pcie->cfg,
530 pcie->big_endian);
531
532 pcie->mode = readb(pcie->ccsr + PCI_HEADER_TYPE) & 0x7f;
533
534 if (pcie->mode == PCI_HEADER_TYPE_NORMAL) {
Wasim Khanc892b9f2020-09-28 16:26:05 +0530535 printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name,
536 "Endpoint");
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000537 ls_pcie_g4_setup_ep(pcie);
538 } else {
Wasim Khanc892b9f2020-09-28 16:26:05 +0530539 printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name,
540 "Root Complex");
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000541 ls_pcie_g4_setup_ctrl(pcie);
542 }
543
544 /* Enable Amba & PEX PIO */
545 val = ccsr_readl(pcie, PAB_CTRL);
546 val |= PAB_CTRL_APIO_EN | PAB_CTRL_PPIO_EN;
547 ccsr_writel(pcie, PAB_CTRL, val);
548
549 val = ccsr_readl(pcie, PAB_PEX_PIO_CTRL(0));
550 val |= PPIO_EN;
551 ccsr_writel(pcie, PAB_PEX_PIO_CTRL(0), val);
552
553 if (!ls_pcie_g4_link_up(pcie)) {
554 /* Let the user know there's no PCIe link */
555 printf(": no link\n");
556 return 0;
557 }
558
559 /* Print the negotiated PCIe link width */
560 link_ctrl_sta = ccsr_readl(pcie, PCIE_LINK_CTRL_STA);
561 printf(": x%d gen%d\n",
562 (link_ctrl_sta >> PCIE_LINK_WIDTH_SHIFT & PCIE_LINK_WIDTH_MASK),
563 (link_ctrl_sta >> PCIE_LINK_SPEED_SHIFT) & PCIE_LINK_SPEED_MASK);
564
565 return 0;
566}
567
568static const struct dm_pci_ops ls_pcie_g4_ops = {
569 .read_config = ls_pcie_g4_read_config,
570 .write_config = ls_pcie_g4_write_config,
571};
572
573static const struct udevice_id ls_pcie_g4_ids[] = {
574 { .compatible = "fsl,lx2160a-pcie" },
575 { }
576};
577
578U_BOOT_DRIVER(pcie_layerscape_gen4) = {
579 .name = "pcie_layerscape_gen4",
580 .id = UCLASS_PCI,
581 .of_match = ls_pcie_g4_ids,
582 .ops = &ls_pcie_g4_ops,
583 .probe = ls_pcie_g4_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700584 .priv_auto = sizeof(struct ls_pcie_g4),
Hou Zhiqiange5d79c42019-04-08 10:15:46 +0000585};