blob: e3e22891088e2bf87d710f0a7ee77b1538ba6819 [file] [log] [blame]
Masami Hiramatsu06850202021-06-04 18:44:06 +09001// SPDX-License-Identifier: GPL-2.0
2/*
3 * SynQuacer PCIE host driver
4 *
5 * Based on drivers/pci/pcie_ecam_generic.c
6 *
7 * Copyright (C) 2016 Imagination Technologies
8 * Copyright (C) 2021 Linaro Ltd.
9 */
10
11#include <common.h>
12#include <dm.h>
13#include <pci.h>
14#include <log.h>
15
16#include <asm/io.h>
17#include <linux/bitops.h>
18#include <linux/delay.h>
19
20/* iATU registers */
21#define IATU_VIEWPORT_OFF 0x900
22#define IATU_VIEWPORT_INBOUND BIT(31)
23#define IATU_VIEWPORT_OUTBOUND 0
24#define IATU_VIEWPORT_REGION_INDEX(idx) ((idx) & 7)
25
26#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0 0x904
27#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM 0x0
28#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO 0x2
29#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0 0x4
30#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1 0x5
31#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH BIT(12)
32
33#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0 0x908
34#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN BIT(31)
35#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE BIT(28)
36#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT 0xF
37#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_64BIT 0xFF
38
39#define IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 0x90C
40#define IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 0x910
41#define IATU_LIMIT_ADDR_OFF_OUTBOUND_0 0x914
42#define IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 0x918
43#define IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 0x91C
44
45/* Clock and resets */
46#define CORE_CONTROL 0x000
47#define APP_LTSSM_ENABLE BIT(4)
48#define DEVICE_TYPE (BIT(3) | BIT(2) | BIT(1) | BIT(0))
49
50#define AXI_CLK_STOP 0x004
51#define DBI_ACLK_STOP BIT(8)
52#define SLV_ACLK_STOP BIT(4)
53#define MSTR_ACLK_STOP BIT(0)
54#define DBI_CSYSREQ_REG BIT(9)
55#define SLV_CSYSREQ_REG BIT(5)
56#define MSTR_CSYSREQ_REG BIT(1)
57
58#define RESET_CONTROL_1 0x00C
59#define PERST_N_O_REG BIT(5)
60#define PERST_N_I_REG BIT(4)
61#define BUTTON_RST_N_REG BIT(1)
62#define PWUP_RST_N_REG BIT(0)
63
64#define RESET_CONTROL_2 0x010
65
66#define RESET_SELECT_1 0x014
67#define SQU_RST_SEL BIT(29)
68#define PHY_RST_SEL BIT(28)
69#define PWR_RST_SEL BIT(24)
70#define STI_RST_SEL BIT(20)
71#define N_STI_RST_SEL BIT(16)
72#define CORE_RST_SEL BIT(12)
73#define PERST_SEL BIT(4)
74#define BUTTON_RST_SEL BIT(1)
75#define PWUP_RST_SEL BIT(0)
76
77#define RESET_SELECT_2 0x018
78#define DBI_ARST_SEL BIT(8)
79#define SLV_ARST_SEL BIT(4)
80#define MSTR_ARST_SEL BIT(0)
81
82#define EM_CONTROL 0x030
83#define PRE_DET_STT_REG BIT(4)
84
85#define EM_SELECT 0x034
86#define PRE_DET_STT_SEL BIT(4)
87
88#define PM_CONTROL_2 0x050
89#define SYS_AUX_PWR_DET BIT(8)
90
91#define PHY_CONFIG_COM_6 0x114
92#define PIPE_PORT_SEL GENMASK(1, 0)
93
94#define LINK_MONITOR 0x210
95#define SMLH_LINK_UP BIT(0)
96
97#define LINK_CAPABILITIES_REG 0x07C
98#define PCIE_CAP_MAX_LINK_WIDTH GENMASK(7, 4)
99#define PCIE_CAP_MAX_LINK_SPEED GENMASK(3, 0)
100
101#define LINK_CONTROL_LINK_STATUS_REG 0x080
102#define PCIE_CAP_NEGO_LINK_WIDTH GENMASK(23, 20)
103#define PCIE_CAP_LINK_SPEED GENMASK(19, 16)
104
105#define TYPE1_CLASS_CODE_REV_ID_REG 0x008
106#define BASE_CLASS_CODE 0xFF000000
107#define BASE_CLASS_CODE_VALUE 0x06
108#define SUBCLASS_CODE 0x00FF0000
109#define SUBCLASS_CODE_VALUE 0x04
110#define PROGRAM_INTERFACE 0x0000FF00
111#define PROGRAM_INTERFACE_VALUE 0x00
112
113#define GEN2_CONTROL_OFF 0x80c
114#define DIRECT_SPEED_CHANGE BIT(17)
115
116#define MISC_CONTROL_1_OFF 0x8BC
117#define DBI_RO_WR_EN BIT(0)
118
119static void or_writel(void *base, u32 offs, u32 val)
120{
121 writel(readl(base + offs) | val, base + offs);
122}
123
124static void masked_writel(void *base, u32 offs, u32 mask, u32 val)
125{
126 u32 data;
127 int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
128
129 if (val && shift > 1)
130 val <<= shift - 1;
131
132 if (mask != ~0)
133 data = (readl(base + offs) & ~mask) | val;
134 else
135 data = val;
136
137 writel(data, base + offs);
138}
139
140static u32 masked_readl(void *base, u32 offs, u32 mask)
141{
142 u32 data;
143 int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
144
145 data = readl(base + offs);
146
147 if (mask != ~0)
148 data &= mask;
149 if (shift > 1)
150 data >>= shift - 1;
151
152 return data;
153}
154
155/*
156 * Since SynQuacer's PCIe RC is expected to be initialized in the
157 * firmware (including U-Boot), devicetree doesn't have control
158 * blocks.
159 *
160 * Thus, this will initialize the PCIe RC with fixed addresses.
161 */
162
163#define SYNQUACER_PCI_SEG0_CONFIG_BASE 0x60000000
164#define SYNQUACER_PCI_SEG0_CONFIG_SIZE 0x07f00000
165#define SYNQUACER_PCI_SEG0_DBI_BASE 0x583d0000
166#define SYNQUACER_PCI_SEG0_EXS_BASE 0x58390000
167
168#define SYNQUACER_PCI_SEG1_CONFIG_BASE 0x70000000
169#define SYNQUACER_PCI_SEG1_CONFIG_SIZE 0x07f00000
170#define SYNQUACER_PCI_SEG1_DBI_BASE 0x583c0000
171#define SYNQUACER_PCI_SEG1_EXS_BASE 0x58380000
172
173#define SIZE_16KB 0x00004000
174#define SIZE_64KB 0x00010000
175#define SIZE_1MB 0x00100000
176
177#define SYNQUACER_PCI_DBI_SIZE SIZE_16KB
178#define SYNQUACER_PCI_EXS_SIZE SIZE_64KB
179
180#define NUM_SQ_PCI_RC 2
181
182static const struct synquacer_pcie_base {
183 phys_addr_t cfg_base;
184 phys_addr_t dbi_base;
185 phys_addr_t exs_base;
186} synquacer_pci_bases[NUM_SQ_PCI_RC] = {
187 {
188 .cfg_base = SYNQUACER_PCI_SEG0_CONFIG_BASE,
189 .dbi_base = SYNQUACER_PCI_SEG0_DBI_BASE,
190 .exs_base = SYNQUACER_PCI_SEG0_EXS_BASE,
191 }, {
192 .cfg_base = SYNQUACER_PCI_SEG1_CONFIG_BASE,
193 .dbi_base = SYNQUACER_PCI_SEG1_DBI_BASE,
194 .exs_base = SYNQUACER_PCI_SEG1_EXS_BASE,
195 },
196};
197
198/**
199 * struct synquacer_ecam_pcie - synquacer_ecam PCIe controller state
200 * @cfg_base: The base address of memory mapped configuration space
201 */
202struct synquacer_ecam_pcie {
203 void *cfg_base;
204 pci_size_t size;
205 void *dbi_base;
206 void *exs_base;
207 int first_busno;
208
209 struct pci_region mem;
210 struct pci_region io;
211 struct pci_region mem64;
212};
213
214DECLARE_GLOBAL_DATA_PTR;
215
216/**
217 * pci_synquacer_ecam_conf_address() - Calculate the address of a config access
218 * @bus: Pointer to the PCI bus
219 * @bdf: Identifies the PCIe device to access
220 * @offset: The offset into the device's configuration space
221 * @paddress: Pointer to the pointer to write the calculates address to
222 *
223 * Calculates the address that should be accessed to perform a PCIe
224 * configuration space access for a given device identified by the PCIe
225 * controller device @pcie and the bus, device & function numbers in @bdf. If
226 * access to the device is not valid then the function will return an error
227 * code. Otherwise the address to access will be written to the pointer pointed
228 * to by @paddress.
229 */
230static int pci_synquacer_ecam_conf_address(const struct udevice *bus,
231 pci_dev_t bdf, uint offset,
232 void **paddress)
233{
234 struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
235 void *addr;
236
237 addr = pcie->cfg_base;
Pali Rohár23769352021-11-03 01:01:05 +0100238 addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf) - pcie->first_busno,
239 PCI_DEV(bdf), PCI_FUNC(bdf), offset);
Masami Hiramatsu06850202021-06-04 18:44:06 +0900240 *paddress = addr;
241
242 return 0;
243}
244
245static bool pci_synquacer_ecam_addr_valid(const struct udevice *bus,
246 pci_dev_t bdf)
247{
248 struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
249 int num_buses = DIV_ROUND_UP(pcie->size, 1 << 16);
250
251 /*
252 * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
253 * type 0 config TLPs sent to devices 1 and up on its downstream port,
254 * resulting in devices appearing multiple times on bus 0 unless we
255 * filter out those accesses here.
256 */
257 if (PCI_BUS(bdf) == pcie->first_busno && PCI_DEV(bdf) > 0)
258 return false;
259
260 return (PCI_BUS(bdf) >= pcie->first_busno &&
261 PCI_BUS(bdf) < pcie->first_busno + num_buses);
262}
263
264/**
265 * pci_synquacer_ecam_read_config() - Read from configuration space
266 * @bus: Pointer to the PCI bus
267 * @bdf: Identifies the PCIe device to access
268 * @offset: The offset into the device's configuration space
269 * @valuep: A pointer at which to store the read value
270 * @size: Indicates the size of access to perform
271 *
272 * Read a value of size @size from offset @offset within the configuration
273 * space of the device identified by the bus, device & function numbers in @bdf
274 * on the PCI bus @bus.
275 */
276static int pci_synquacer_ecam_read_config(const struct udevice *bus,
277 pci_dev_t bdf, uint offset,
278 ulong *valuep, enum pci_size_t size)
279{
280 if (!pci_synquacer_ecam_addr_valid(bus, bdf)) {
281 *valuep = pci_get_ff(size);
282 return 0;
283 }
284
285 return pci_generic_mmap_read_config(bus, pci_synquacer_ecam_conf_address,
286 bdf, offset, valuep, size);
287}
288
289/**
290 * pci_synquacer_ecam_write_config() - Write to configuration space
291 * @bus: Pointer to the PCI bus
292 * @bdf: Identifies the PCIe device to access
293 * @offset: The offset into the device's configuration space
294 * @value: The value to write
295 * @size: Indicates the size of access to perform
296 *
297 * Write the value @value of size @size from offset @offset within the
298 * configuration space of the device identified by the bus, device & function
299 * numbers in @bdf on the PCI bus @bus.
300 */
301static int pci_synquacer_ecam_write_config(struct udevice *bus, pci_dev_t bdf,
302 uint offset, ulong value,
303 enum pci_size_t size)
304{
305 if (!pci_synquacer_ecam_addr_valid(bus, bdf))
306 return 0;
307
308 return pci_generic_mmap_write_config(bus, pci_synquacer_ecam_conf_address,
309 bdf, offset, value, size);
310}
311
312/**
313 * pci_synquacer_ecam_of_to_plat() - Translate from DT to device state
314 * @dev: A pointer to the device being operated on
315 *
316 * Translate relevant data from the device tree pertaining to device @dev into
317 * state that the driver will later make use of. This state is stored in the
318 * device's private data structure.
319 *
320 * Return: 0 on success, else -EINVAL
321 */
322static int pci_synquacer_ecam_of_to_plat(struct udevice *dev)
323{
324 struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
325 struct fdt_resource reg_res;
326 int i, err;
327
328 debug("%s: called for %s\n", __func__, dev->name);
329
330 err = fdt_get_resource(gd->fdt_blob, dev_of_offset(dev), "reg",
331 0, &reg_res);
332 if (err < 0) {
333 pr_err("\"reg\" resource not found\n");
334 return err;
335 }
336
337 /* Find the correct pair of the DBI/EXS base address */
338 for (i = 0; i < NUM_SQ_PCI_RC; i++) {
339 if (synquacer_pci_bases[i].cfg_base == reg_res.start)
340 break;
341 }
342 if (i == NUM_SQ_PCI_RC) {
343 pr_err("Unknown ECAM base address %lx.\n",
344 (unsigned long)reg_res.start);
345 return -ENOENT;
346 }
347 pcie->dbi_base = map_physmem(synquacer_pci_bases[i].dbi_base,
348 SYNQUACER_PCI_DBI_SIZE, MAP_NOCACHE);
349 if (!pcie->dbi_base) {
350 pr_err("Failed to map DBI for %s\n", dev->name);
351 return -ENOMEM;
352 }
353
354 pcie->exs_base = map_physmem(synquacer_pci_bases[i].exs_base,
355 SYNQUACER_PCI_EXS_SIZE, MAP_NOCACHE);
356 if (!pcie->exs_base) {
357 pr_err("Failed to map EXS for %s\n", dev->name);
358 return -ENOMEM;
359 }
360
361 pcie->size = fdt_resource_size(&reg_res);
362 pcie->cfg_base = map_physmem(reg_res.start, pcie->size, MAP_NOCACHE);
363 if (!pcie->cfg_base) {
364 pr_err("Failed to map config space for %s\n", dev->name);
365 return -ENOMEM;
366 }
367 debug("mappings DBI: %p EXS: %p CFG: %p\n", pcie->dbi_base, pcie->exs_base, pcie->cfg_base);
368
369 return 0;
370}
371
372static void pci_synquacer_pre_init(struct synquacer_ecam_pcie *pcie)
373{
374 void *base = pcie->exs_base;
375
376 masked_writel(base, EM_SELECT, PRE_DET_STT_SEL, 0);
377 masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 0);
378 masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 1);
379
380 /* 1: Assert all PHY / LINK resets */
381 masked_writel(base, RESET_SELECT_1, PERST_SEL, 0);
382 masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 0);
383 masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 0);
384
385 /* Device Reset(PERST#) is effective afrer Set device_type (RC) */
386 masked_writel(base, RESET_SELECT_1, PWUP_RST_SEL, 0);
387 masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 0);
388 masked_writel(base, RESET_SELECT_1, BUTTON_RST_SEL, 0);
389 masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 0);
390 masked_writel(base, RESET_SELECT_1, PWR_RST_SEL, 1);
391 masked_writel(base, RESET_SELECT_2, MSTR_ARST_SEL, 1);
392 masked_writel(base, RESET_SELECT_2, SLV_ARST_SEL, 1);
393 masked_writel(base, RESET_SELECT_2, DBI_ARST_SEL, 1);
394 masked_writel(base, RESET_SELECT_1, CORE_RST_SEL, 1);
395 masked_writel(base, RESET_SELECT_1, STI_RST_SEL, 1);
396 masked_writel(base, RESET_SELECT_1, N_STI_RST_SEL, 1);
397 masked_writel(base, RESET_SELECT_1, SQU_RST_SEL, 1);
398 masked_writel(base, RESET_SELECT_1, PHY_RST_SEL, 1);
399
400 /* 2: Set P<n>_app_ltssm_enable='0' for reprogramming before linkup. */
401 masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 0);
402
403 /* 3: Set device_type (RC) */
404 masked_writel(base, CORE_CONTROL, DEVICE_TYPE, 4);
405}
406
407static void pci_synquacer_dbi_init(void *dbi_base)
408{
409 masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 1);
410 /* 4 Lanes */
411 masked_writel(dbi_base, LINK_CAPABILITIES_REG,
412 PCIE_CAP_MAX_LINK_WIDTH, 4);
413 /* Gen 2 */
414 masked_writel(dbi_base, LINK_CAPABILITIES_REG,
415 PCIE_CAP_MAX_LINK_SPEED, 2);
416
417 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
418 BASE_CLASS_CODE, BASE_CLASS_CODE_VALUE);
419 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
420 SUBCLASS_CODE, SUBCLASS_CODE_VALUE);
421 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
422 PROGRAM_INTERFACE, PROGRAM_INTERFACE_VALUE);
423
424 masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 0);
425}
426
427static void pcie_sq_prog_outbound_atu(void *dbi_base, int index,
428 u64 cpu_base, u64 pci_base, u64 size,
429 u32 type, u32 flags)
430{
431 debug("%s: %p, %d, %llx, %llx, %llx, %x, %x\n", __func__,
432 dbi_base, index, cpu_base, pci_base, size, type, flags);
433
434 writel(IATU_VIEWPORT_OUTBOUND | IATU_VIEWPORT_REGION_INDEX(index),
435 dbi_base + IATU_VIEWPORT_OFF);
436
437 writel((u32)(cpu_base & 0xffffffff),
438 dbi_base + IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0);
439 writel((u32)(cpu_base >> 32),
440 dbi_base + IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0);
441 writel((u32)(cpu_base + size - 1),
442 dbi_base + IATU_LIMIT_ADDR_OFF_OUTBOUND_0);
443
444 writel((u32)(pci_base & 0xffffffff),
445 dbi_base + IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0);
446 writel((u32)(pci_base >> 32),
447 dbi_base + IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0);
448
449 writel(type, dbi_base + IATU_REGION_CTRL_1_OFF_OUTBOUND_0);
450 writel(IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN | flags,
451 dbi_base + IATU_REGION_CTRL_2_OFF_OUTBOUND_0);
452}
453
454static void pci_synquacer_post_init(struct synquacer_ecam_pcie *pcie)
455{
456 void *base = pcie->exs_base;
457
458 /*
459 * 4: Set Bifurcation 1=disable 4=able
460 * 5: Supply Reference (It has executed)
461 * 6: Wait for 10usec (Reference Clocks is stable)
462 * 7: De assert PERST#
463 */
464 masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 1);
465 masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 1);
466
467 /* 8: Assert SYS_AUX_PWR_DET */
468 masked_writel(base, PM_CONTROL_2, SYS_AUX_PWR_DET, 1);
469
470 /* 9: Supply following clocks */
471 masked_writel(base, AXI_CLK_STOP, MSTR_CSYSREQ_REG, 1);
472 masked_writel(base, AXI_CLK_STOP, MSTR_ACLK_STOP, 0);
473 masked_writel(base, AXI_CLK_STOP, SLV_CSYSREQ_REG, 1);
474 masked_writel(base, AXI_CLK_STOP, SLV_ACLK_STOP, 0);
475 masked_writel(base, AXI_CLK_STOP, DBI_CSYSREQ_REG, 1);
476 masked_writel(base, AXI_CLK_STOP, DBI_ACLK_STOP, 0);
477
478 /*
479 * 10: De assert PHY reset
480 * 11: De assert LINK's PMC reset
481 */
482 masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 1);
483 masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 1);
484
485 /* 12: PHY auto
486 * 13: Wrapper auto
487 * 14-17: PHY auto
488 * 18: Wrapper auto
489 * 19: Update registers through DBI AXI Slave interface
490 */
491 pci_synquacer_dbi_init(pcie->dbi_base);
492
493 or_writel(pcie->dbi_base, PCI_COMMAND,
494 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
495
496 /* Force link speed change to Gen2 at link up */
497 or_writel(pcie->dbi_base, GEN2_CONTROL_OFF, DIRECT_SPEED_CHANGE);
498
499 /* Region 0: MMIO32 range */
500 pcie_sq_prog_outbound_atu(pcie->dbi_base, 0,
501 pcie->mem.phys_start,
502 pcie->mem.bus_start,
503 pcie->mem.size,
504 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
505 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
506 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
507
508 /* Region 1: Type 0 config space */
509 pcie_sq_prog_outbound_atu(pcie->dbi_base, 1,
510 (u64)pcie->cfg_base,
511 0,
512 SIZE_64KB,
513 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0,
514 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
515
516 /* Region 2: Type 1 config space */
517 pcie_sq_prog_outbound_atu(pcie->dbi_base, 2,
518 (u64)pcie->cfg_base + SIZE_64KB,
519 0,
520 (u64)pcie->io.phys_start - (u64)pcie->cfg_base - SIZE_64KB,
521 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1,
522 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
523
524 /* Region 3: port I/O range */
525 pcie_sq_prog_outbound_atu(pcie->dbi_base, 3,
526 pcie->io.phys_start,
527 pcie->io.bus_start,
528 pcie->io.size,
529 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO,
530 0);
531
532 /* Region 4: MMIO64 range */
533 pcie_sq_prog_outbound_atu(pcie->dbi_base, 4,
534 pcie->mem64.phys_start,
535 pcie->mem64.bus_start,
536 pcie->mem64.size,
537 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
538 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
539 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
540
541 /* enable link */
542 if (masked_readl(base, CORE_CONTROL, APP_LTSSM_ENABLE) == 0)
543 masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 1);
544}
545
546static int pci_synquacer_ecam_probe(struct udevice *dev)
547{
548 struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
549 struct udevice *ctlr = pci_get_controller(dev);
550 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
551
552 debug("Probe synquacer pcie for bus %d\n", dev_seq(dev));
553 pcie->first_busno = dev_seq(dev);
554
555 /* Store the IO and MEM windows settings for configuring ATU */
556 pcie->io.phys_start = hose->regions[0].phys_start; /* IO base */
557 pcie->io.bus_start = hose->regions[0].bus_start; /* IO_bus_addr */
558 pcie->io.size = hose->regions[0].size; /* IO size */
559
560 pcie->mem.phys_start = hose->regions[1].phys_start; /* MEM base */
561 pcie->mem.bus_start = hose->regions[1].bus_start; /* MEM_bus_addr */
562 pcie->mem.size = hose->regions[1].size; /* MEM size */
563
564 pcie->mem64.phys_start = hose->regions[2].phys_start; /* MEM64 base */
565 pcie->mem64.bus_start = hose->regions[2].bus_start; /* MEM64_bus_addr */
566 pcie->mem64.size = hose->regions[2].size; /* MEM64 size */
567
568 pci_synquacer_pre_init(pcie);
569
570 mdelay(150);
571
572 pci_synquacer_post_init(pcie);
573
574 /* It takes a while to stabilize the PCIe bus for scanning */
575 mdelay(100);
576
577 return 0;
578}
579
580static const struct dm_pci_ops pci_synquacer_ecam_ops = {
581 .read_config = pci_synquacer_ecam_read_config,
582 .write_config = pci_synquacer_ecam_write_config,
583};
584
585static const struct udevice_id pci_synquacer_ecam_ids[] = {
586 { .compatible = "socionext,synquacer-pcie-ecam" },
587 { }
588};
589
590U_BOOT_DRIVER(pci_synquacer_ecam) = {
591 .name = "pci_synquacer_ecam",
592 .id = UCLASS_PCI,
593 .of_match = pci_synquacer_ecam_ids,
594 .ops = &pci_synquacer_ecam_ops,
595 .probe = pci_synquacer_ecam_probe,
596 .of_to_plat = pci_synquacer_ecam_of_to_plat,
597 .priv_auto = sizeof(struct synquacer_ecam_pcie),
598};