blob: 503fd5e5075dba4c04e49e84d3f500586d54faa3 [file] [log] [blame]
Minghuan Liana4d6b612014-10-31 13:43:44 +08001/*
Priyanka Jain2b361782017-04-27 15:08:06 +05302 * Copyright 2017 NXP
Minghuan Lianfdab5452015-01-21 17:29:20 +08003 * Copyright 2014-2015 Freescale Semiconductor, Inc.
Minghuan Liana4d6b612014-10-31 13:43:44 +08004 * Layerscape PCIe driver
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <asm/arch/fsl_serdes.h>
11#include <pci.h>
12#include <asm/io.h>
Minghuan Lianfdab5452015-01-21 17:29:20 +080013#include <errno.h>
14#include <malloc.h>
Minghuan Lianc1067842016-12-13 14:54:17 +080015#include <dm.h>
Simon Glass243182c2017-05-17 08:23:06 -060016#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
17 defined(CONFIG_ARM)
18#include <asm/arch/clock.h>
19#endif
Hou Zhiqiang09716a7b2016-12-13 14:54:16 +080020#include "pcie_layerscape.h"
Minghuan Lianfdab5452015-01-21 17:29:20 +080021
Minghuan Lianc1067842016-12-13 14:54:17 +080022DECLARE_GLOBAL_DATA_PTR;
23
Minghuan Lianc1067842016-12-13 14:54:17 +080024LIST_HEAD(ls_pcie_list);
25
26static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
27{
28 return in_le32(pcie->dbi + offset);
29}
30
31static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
32 unsigned int offset)
33{
34 out_le32(pcie->dbi + offset, value);
35}
36
37static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
38{
39 if (pcie->big_endian)
40 return in_be32(pcie->ctrl + offset);
41 else
42 return in_le32(pcie->ctrl + offset);
43}
44
45static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
46 unsigned int offset)
47{
48 if (pcie->big_endian)
49 out_be32(pcie->ctrl + offset, value);
50 else
51 out_le32(pcie->ctrl + offset, value);
52}
53
54static int ls_pcie_ltssm(struct ls_pcie *pcie)
55{
56 u32 state;
57 uint svr;
58
59 svr = get_svr();
60 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
61 state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
62 state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
63 } else {
64 state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
65 }
66
67 return state;
68}
69
70static int ls_pcie_link_up(struct ls_pcie *pcie)
71{
72 int ltssm;
73
74 ltssm = ls_pcie_ltssm(pcie);
75 if (ltssm < LTSSM_PCIE_L0)
76 return 0;
77
78 return 1;
79}
80
81static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
82{
83 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
84 PCIE_ATU_VIEWPORT);
85 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
86}
87
88static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
89{
90 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
91 PCIE_ATU_VIEWPORT);
92 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
93}
94
95static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
96 u64 phys, u64 bus_addr, pci_size_t size)
97{
98 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
99 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
100 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
101 dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
102 dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
103 dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
104 dbi_writel(pcie, type, PCIE_ATU_CR1);
105 dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
106}
107
108/* Use bar match mode and MEM type as default */
109static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
110 int bar, u64 phys)
111{
112 dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
113 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
114 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
115 dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
116 dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
117 PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
118}
119
120static void ls_pcie_dump_atu(struct ls_pcie *pcie)
121{
122 int i;
123
124 for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
125 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
126 PCIE_ATU_VIEWPORT);
127 debug("iATU%d:\n", i);
128 debug("\tLOWER PHYS 0x%08x\n",
129 dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
130 debug("\tUPPER PHYS 0x%08x\n",
131 dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
132 debug("\tLOWER BUS 0x%08x\n",
133 dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
134 debug("\tUPPER BUS 0x%08x\n",
135 dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
136 debug("\tLIMIT 0x%08x\n",
137 readl(pcie->dbi + PCIE_ATU_LIMIT));
138 debug("\tCR1 0x%08x\n",
139 dbi_readl(pcie, PCIE_ATU_CR1));
140 debug("\tCR2 0x%08x\n",
141 dbi_readl(pcie, PCIE_ATU_CR2));
142 }
143}
144
145static void ls_pcie_setup_atu(struct ls_pcie *pcie)
146{
147 struct pci_region *io, *mem, *pref;
148 unsigned long long offset = 0;
149 int idx = 0;
150 uint svr;
151
152 svr = get_svr();
153 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
154 offset = LS1021_PCIE_SPACE_OFFSET +
155 LS1021_PCIE_SPACE_SIZE * pcie->idx;
156 }
157
158 /* ATU 0 : OUTBOUND : CFG0 */
159 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
160 PCIE_ATU_TYPE_CFG0,
161 pcie->cfg_res.start + offset,
162 0,
163 fdt_resource_size(&pcie->cfg_res) / 2);
164 /* ATU 1 : OUTBOUND : CFG1 */
165 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
166 PCIE_ATU_TYPE_CFG1,
167 pcie->cfg_res.start + offset +
168 fdt_resource_size(&pcie->cfg_res) / 2,
169 0,
170 fdt_resource_size(&pcie->cfg_res) / 2);
171
172 pci_get_regions(pcie->bus, &io, &mem, &pref);
173 idx = PCIE_ATU_REGION_INDEX1 + 1;
174
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800175 /* Fix the pcie memory map for LS2088A series SoCs */
176 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
177 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
Priyanka Jain2b361782017-04-27 15:08:06 +0530178 svr == SVR_LS2048A || svr == SVR_LS2044A ||
179 svr == SVR_LS2081A || svr == SVR_LS2041A) {
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800180 if (io)
181 io->phys_start = (io->phys_start &
182 (PCIE_PHYS_SIZE - 1)) +
183 LS2088A_PCIE1_PHYS_ADDR +
184 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
185 if (mem)
186 mem->phys_start = (mem->phys_start &
187 (PCIE_PHYS_SIZE - 1)) +
188 LS2088A_PCIE1_PHYS_ADDR +
189 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
190 if (pref)
191 pref->phys_start = (pref->phys_start &
192 (PCIE_PHYS_SIZE - 1)) +
193 LS2088A_PCIE1_PHYS_ADDR +
194 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
195 }
196
Minghuan Lianc1067842016-12-13 14:54:17 +0800197 if (io)
198 /* ATU : OUTBOUND : IO */
199 ls_pcie_atu_outbound_set(pcie, idx++,
200 PCIE_ATU_TYPE_IO,
201 io->phys_start + offset,
202 io->bus_start,
203 io->size);
204
205 if (mem)
206 /* ATU : OUTBOUND : MEM */
207 ls_pcie_atu_outbound_set(pcie, idx++,
208 PCIE_ATU_TYPE_MEM,
209 mem->phys_start + offset,
210 mem->bus_start,
211 mem->size);
212
213 if (pref)
214 /* ATU : OUTBOUND : pref */
215 ls_pcie_atu_outbound_set(pcie, idx++,
216 PCIE_ATU_TYPE_MEM,
217 pref->phys_start + offset,
218 pref->bus_start,
219 pref->size);
220
221 ls_pcie_dump_atu(pcie);
222}
223
224/* Return 0 if the address is valid, -errno if not valid */
225static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
226{
227 struct udevice *bus = pcie->bus;
228
229 if (!pcie->enabled)
230 return -ENXIO;
231
232 if (PCI_BUS(bdf) < bus->seq)
233 return -EINVAL;
234
235 if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
236 return -EINVAL;
237
238 if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
239 return -EINVAL;
240
241 return 0;
242}
243
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300244int ls_pcie_conf_address(struct udevice *bus, pci_dev_t bdf,
245 uint offset, void **paddress)
Minghuan Lianc1067842016-12-13 14:54:17 +0800246{
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300247 struct ls_pcie *pcie = dev_get_priv(bus);
Minghuan Lianc1067842016-12-13 14:54:17 +0800248 u32 busdev;
249
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300250 if (ls_pcie_addr_valid(pcie, bdf))
251 return -EINVAL;
252
253 if (PCI_BUS(bdf) == bus->seq) {
254 *paddress = pcie->dbi + offset;
255 return 0;
256 }
Minghuan Lianc1067842016-12-13 14:54:17 +0800257
Minghuan Liana1c94382017-10-20 10:45:50 +0800258 busdev = PCIE_ATU_BUS(PCI_BUS(bdf) - bus->seq) |
Minghuan Lianc1067842016-12-13 14:54:17 +0800259 PCIE_ATU_DEV(PCI_DEV(bdf)) |
260 PCIE_ATU_FUNC(PCI_FUNC(bdf));
261
262 if (PCI_BUS(bdf) == bus->seq + 1) {
263 ls_pcie_cfg0_set_busdev(pcie, busdev);
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300264 *paddress = pcie->cfg0 + offset;
Minghuan Lianc1067842016-12-13 14:54:17 +0800265 } else {
266 ls_pcie_cfg1_set_busdev(pcie, busdev);
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300267 *paddress = pcie->cfg1 + offset;
Minghuan Lianc1067842016-12-13 14:54:17 +0800268 }
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300269 return 0;
Minghuan Lianc1067842016-12-13 14:54:17 +0800270}
271
272static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
273 uint offset, ulong *valuep,
274 enum pci_size_t size)
275{
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300276 return pci_generic_mmap_read_config(bus, ls_pcie_conf_address,
277 bdf, offset, valuep, size);
Minghuan Lianc1067842016-12-13 14:54:17 +0800278}
279
280static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
281 uint offset, ulong value,
282 enum pci_size_t size)
283{
Tuomas Tynkkynen544a2e72017-09-19 23:18:05 +0300284 return pci_generic_mmap_write_config(bus, ls_pcie_conf_address,
285 bdf, offset, value, size);
Minghuan Lianc1067842016-12-13 14:54:17 +0800286}
287
288/* Clear multi-function bit */
289static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
290{
291 writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
292}
293
294/* Fix class value */
295static void ls_pcie_fix_class(struct ls_pcie *pcie)
296{
297 writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
298}
299
300/* Drop MSG TLP except for Vendor MSG */
301static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
302{
303 u32 val;
304
305 val = dbi_readl(pcie, PCIE_STRFMR1);
306 val &= 0xDFFFFFFF;
307 dbi_writel(pcie, val, PCIE_STRFMR1);
308}
309
310/* Disable all bars in RC mode */
311static void ls_pcie_disable_bars(struct ls_pcie *pcie)
312{
313 u32 sriov;
314
315 sriov = in_le32(pcie->dbi + PCIE_SRIOV);
316
317 /*
318 * TODO: For PCIe controller with SRIOV, the method to disable bars
319 * is different and more complex, so will add later.
320 */
321 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
322 return;
323
324 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
325 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
326 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
327}
328
329static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
330{
331 ls_pcie_setup_atu(pcie);
332
333 dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
334 ls_pcie_fix_class(pcie);
335 ls_pcie_clear_multifunction(pcie);
336 ls_pcie_drop_msg_tlp(pcie);
337 dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
338
339 ls_pcie_disable_bars(pcie);
340}
341
342static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
343{
344 u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
345
346 /* ATU 0 : INBOUND : map BAR0 */
347 ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
348 /* ATU 1 : INBOUND : map BAR1 */
349 phys += PCIE_BAR1_SIZE;
350 ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
351 /* ATU 2 : INBOUND : map BAR2 */
352 phys += PCIE_BAR2_SIZE;
353 ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
354 /* ATU 3 : INBOUND : map BAR4 */
355 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
356 ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
357
358 /* ATU 0 : OUTBOUND : map MEM */
359 ls_pcie_atu_outbound_set(pcie, 0,
360 PCIE_ATU_TYPE_MEM,
361 pcie->cfg_res.start,
362 0,
363 CONFIG_SYS_PCI_MEMORY_SIZE);
364}
365
366/* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
367static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
368{
369 /* The least inbound window is 4KiB */
370 if (size < 4 * 1024)
371 return;
372
373 switch (bar) {
374 case 0:
375 writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
376 break;
377 case 1:
378 writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
379 break;
380 case 2:
381 writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
382 writel(0, bar_base + PCI_BASE_ADDRESS_3);
383 break;
384 case 4:
385 writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
386 writel(0, bar_base + PCI_BASE_ADDRESS_5);
387 break;
388 default:
389 break;
390 }
391}
392
393static void ls_pcie_ep_setup_bars(void *bar_base)
394{
395 /* BAR0 - 32bit - 4K configuration */
396 ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
397 /* BAR1 - 32bit - 8K MSIX*/
398 ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
399 /* BAR2 - 64bit - 4K MEM desciptor */
400 ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
401 /* BAR4 - 64bit - 1M MEM*/
402 ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
403}
404
Hou Zhiqiang5faf5612017-02-10 15:42:11 +0800405static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie)
406{
407 ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG);
408}
409
Minghuan Lianc1067842016-12-13 14:54:17 +0800410static void ls_pcie_setup_ep(struct ls_pcie *pcie)
411{
412 u32 sriov;
413
414 sriov = readl(pcie->dbi + PCIE_SRIOV);
415 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
416 int pf, vf;
417
418 for (pf = 0; pf < PCIE_PF_NUM; pf++) {
419 for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
420 ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
421 PCIE_PF_VF_CTRL);
422
423 ls_pcie_ep_setup_bars(pcie->dbi);
424 ls_pcie_ep_setup_atu(pcie);
425 }
426 }
427 /* Disable CFG2 */
428 ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
429 } else {
430 ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
431 ls_pcie_ep_setup_atu(pcie);
432 }
Hou Zhiqiang5faf5612017-02-10 15:42:11 +0800433
434 ls_pcie_ep_enable_cfg(pcie);
Minghuan Lianc1067842016-12-13 14:54:17 +0800435}
436
437static int ls_pcie_probe(struct udevice *dev)
438{
439 struct ls_pcie *pcie = dev_get_priv(dev);
440 const void *fdt = gd->fdt_blob;
Simon Glassdd79d6e2017-01-17 16:52:55 -0700441 int node = dev_of_offset(dev);
Minghuan Lianc1067842016-12-13 14:54:17 +0800442 u8 header_type;
443 u16 link_sta;
444 bool ep_mode;
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800445 uint svr;
Minghuan Lianc1067842016-12-13 14:54:17 +0800446 int ret;
Hou Zhiqiangb9dd2e62017-07-18 11:29:12 +0800447 fdt_size_t cfg_size;
Minghuan Lianc1067842016-12-13 14:54:17 +0800448
449 pcie->bus = dev;
450
451 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
452 "dbi", &pcie->dbi_res);
453 if (ret) {
454 printf("ls-pcie: resource \"dbi\" not found\n");
455 return ret;
456 }
457
458 pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
459
460 list_add(&pcie->list, &ls_pcie_list);
461
462 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
463 if (!pcie->enabled) {
464 printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
465 return 0;
466 }
467
468 pcie->dbi = map_physmem(pcie->dbi_res.start,
469 fdt_resource_size(&pcie->dbi_res),
470 MAP_NOCACHE);
471
472 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
473 "lut", &pcie->lut_res);
474 if (!ret)
475 pcie->lut = map_physmem(pcie->lut_res.start,
476 fdt_resource_size(&pcie->lut_res),
477 MAP_NOCACHE);
478
479 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
480 "ctrl", &pcie->ctrl_res);
481 if (!ret)
482 pcie->ctrl = map_physmem(pcie->ctrl_res.start,
483 fdt_resource_size(&pcie->ctrl_res),
484 MAP_NOCACHE);
485 if (!pcie->ctrl)
486 pcie->ctrl = pcie->lut;
487
488 if (!pcie->ctrl) {
489 printf("%s: NOT find CTRL\n", dev->name);
490 return -1;
491 }
492
493 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
494 "config", &pcie->cfg_res);
495 if (ret) {
496 printf("%s: resource \"config\" not found\n", dev->name);
497 return ret;
498 }
499
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800500 /*
501 * Fix the pcie memory map address and PF control registers address
502 * for LS2088A series SoCs
503 */
504 svr = get_svr();
505 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
506 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
Priyanka Jain2b361782017-04-27 15:08:06 +0530507 svr == SVR_LS2048A || svr == SVR_LS2044A ||
508 svr == SVR_LS2081A || svr == SVR_LS2041A) {
Hou Zhiqiangb9dd2e62017-07-18 11:29:12 +0800509 cfg_size = fdt_resource_size(&pcie->cfg_res);
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800510 pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR +
511 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
Hou Zhiqiangb9dd2e62017-07-18 11:29:12 +0800512 pcie->cfg_res.end = pcie->cfg_res.start + cfg_size;
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800513 pcie->ctrl = pcie->lut + 0x40000;
514 }
515
Minghuan Lianc1067842016-12-13 14:54:17 +0800516 pcie->cfg0 = map_physmem(pcie->cfg_res.start,
517 fdt_resource_size(&pcie->cfg_res),
518 MAP_NOCACHE);
519 pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
520
521 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
522
523 debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
524 dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
525 (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
526 pcie->big_endian);
527
528 header_type = readb(pcie->dbi + PCI_HEADER_TYPE);
529 ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
530 printf("PCIe%u: %s %s", pcie->idx, dev->name,
531 ep_mode ? "Endpoint" : "Root Complex");
532
533 if (ep_mode)
534 ls_pcie_setup_ep(pcie);
535 else
536 ls_pcie_setup_ctrl(pcie);
537
538 if (!ls_pcie_link_up(pcie)) {
539 /* Let the user know there's no PCIe link */
540 printf(": no link\n");
541 return 0;
542 }
543
544 /* Print the negotiated PCIe link width */
545 link_sta = readw(pcie->dbi + PCIE_LINK_STA);
546 printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
547 link_sta & PCIE_LINK_SPEED_MASK);
548
549 return 0;
550}
551
552static const struct dm_pci_ops ls_pcie_ops = {
553 .read_config = ls_pcie_read_config,
554 .write_config = ls_pcie_write_config,
555};
556
557static const struct udevice_id ls_pcie_ids[] = {
558 { .compatible = "fsl,ls-pcie" },
559 { }
560};
561
562U_BOOT_DRIVER(pci_layerscape) = {
563 .name = "pci_layerscape",
564 .id = UCLASS_PCI,
565 .of_match = ls_pcie_ids,
566 .ops = &ls_pcie_ops,
567 .probe = ls_pcie_probe,
568 .priv_auto_alloc_size = sizeof(struct ls_pcie),
569};