blob: c6e7c59f8a64ffbc456668ea51ec0926239f0ab1 [file] [log] [blame]
Masami Hiramatsu06850202021-06-04 18:44:06 +09001// SPDX-License-Identifier: GPL-2.0
2/*
3 * SynQuacer PCIE host driver
4 *
5 * Based on drivers/pci/pcie_ecam_generic.c
6 *
7 * Copyright (C) 2016 Imagination Technologies
8 * Copyright (C) 2021 Linaro Ltd.
9 */
10
11#include <common.h>
12#include <dm.h>
13#include <pci.h>
14#include <log.h>
15
16#include <asm/io.h>
17#include <linux/bitops.h>
18#include <linux/delay.h>
19
20/* iATU registers */
21#define IATU_VIEWPORT_OFF 0x900
22#define IATU_VIEWPORT_INBOUND BIT(31)
23#define IATU_VIEWPORT_OUTBOUND 0
24#define IATU_VIEWPORT_REGION_INDEX(idx) ((idx) & 7)
25
26#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0 0x904
27#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM 0x0
28#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO 0x2
29#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0 0x4
30#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1 0x5
31#define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH BIT(12)
32
33#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0 0x908
34#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN BIT(31)
35#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE BIT(28)
36#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT 0xF
37#define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_64BIT 0xFF
38
39#define IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 0x90C
40#define IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 0x910
41#define IATU_LIMIT_ADDR_OFF_OUTBOUND_0 0x914
42#define IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 0x918
43#define IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 0x91C
44
45/* Clock and resets */
46#define CORE_CONTROL 0x000
47#define APP_LTSSM_ENABLE BIT(4)
48#define DEVICE_TYPE (BIT(3) | BIT(2) | BIT(1) | BIT(0))
49
50#define AXI_CLK_STOP 0x004
51#define DBI_ACLK_STOP BIT(8)
52#define SLV_ACLK_STOP BIT(4)
53#define MSTR_ACLK_STOP BIT(0)
54#define DBI_CSYSREQ_REG BIT(9)
55#define SLV_CSYSREQ_REG BIT(5)
56#define MSTR_CSYSREQ_REG BIT(1)
57
58#define RESET_CONTROL_1 0x00C
59#define PERST_N_O_REG BIT(5)
60#define PERST_N_I_REG BIT(4)
61#define BUTTON_RST_N_REG BIT(1)
62#define PWUP_RST_N_REG BIT(0)
63
64#define RESET_CONTROL_2 0x010
65
66#define RESET_SELECT_1 0x014
67#define SQU_RST_SEL BIT(29)
68#define PHY_RST_SEL BIT(28)
69#define PWR_RST_SEL BIT(24)
70#define STI_RST_SEL BIT(20)
71#define N_STI_RST_SEL BIT(16)
72#define CORE_RST_SEL BIT(12)
73#define PERST_SEL BIT(4)
74#define BUTTON_RST_SEL BIT(1)
75#define PWUP_RST_SEL BIT(0)
76
77#define RESET_SELECT_2 0x018
78#define DBI_ARST_SEL BIT(8)
79#define SLV_ARST_SEL BIT(4)
80#define MSTR_ARST_SEL BIT(0)
81
82#define EM_CONTROL 0x030
83#define PRE_DET_STT_REG BIT(4)
84
85#define EM_SELECT 0x034
86#define PRE_DET_STT_SEL BIT(4)
87
88#define PM_CONTROL_2 0x050
89#define SYS_AUX_PWR_DET BIT(8)
90
91#define PHY_CONFIG_COM_6 0x114
92#define PIPE_PORT_SEL GENMASK(1, 0)
93
94#define LINK_MONITOR 0x210
95#define SMLH_LINK_UP BIT(0)
96
97#define LINK_CAPABILITIES_REG 0x07C
98#define PCIE_CAP_MAX_LINK_WIDTH GENMASK(7, 4)
99#define PCIE_CAP_MAX_LINK_SPEED GENMASK(3, 0)
100
101#define LINK_CONTROL_LINK_STATUS_REG 0x080
102#define PCIE_CAP_NEGO_LINK_WIDTH GENMASK(23, 20)
103#define PCIE_CAP_LINK_SPEED GENMASK(19, 16)
104
105#define TYPE1_CLASS_CODE_REV_ID_REG 0x008
106#define BASE_CLASS_CODE 0xFF000000
107#define BASE_CLASS_CODE_VALUE 0x06
108#define SUBCLASS_CODE 0x00FF0000
109#define SUBCLASS_CODE_VALUE 0x04
110#define PROGRAM_INTERFACE 0x0000FF00
111#define PROGRAM_INTERFACE_VALUE 0x00
112
113#define GEN2_CONTROL_OFF 0x80c
114#define DIRECT_SPEED_CHANGE BIT(17)
115
116#define MISC_CONTROL_1_OFF 0x8BC
117#define DBI_RO_WR_EN BIT(0)
118
119static void or_writel(void *base, u32 offs, u32 val)
120{
121 writel(readl(base + offs) | val, base + offs);
122}
123
124static void masked_writel(void *base, u32 offs, u32 mask, u32 val)
125{
126 u32 data;
127 int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
128
129 if (val && shift > 1)
130 val <<= shift - 1;
131
132 if (mask != ~0)
133 data = (readl(base + offs) & ~mask) | val;
134 else
135 data = val;
136
137 writel(data, base + offs);
138}
139
140static u32 masked_readl(void *base, u32 offs, u32 mask)
141{
142 u32 data;
143 int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
144
145 data = readl(base + offs);
146
147 if (mask != ~0)
148 data &= mask;
149 if (shift > 1)
150 data >>= shift - 1;
151
152 return data;
153}
154
155/*
156 * Since SynQuacer's PCIe RC is expected to be initialized in the
157 * firmware (including U-Boot), devicetree doesn't have control
158 * blocks.
159 *
160 * Thus, this will initialize the PCIe RC with fixed addresses.
161 */
162
163#define SYNQUACER_PCI_SEG0_CONFIG_BASE 0x60000000
164#define SYNQUACER_PCI_SEG0_CONFIG_SIZE 0x07f00000
165#define SYNQUACER_PCI_SEG0_DBI_BASE 0x583d0000
166#define SYNQUACER_PCI_SEG0_EXS_BASE 0x58390000
167
168#define SYNQUACER_PCI_SEG1_CONFIG_BASE 0x70000000
169#define SYNQUACER_PCI_SEG1_CONFIG_SIZE 0x07f00000
170#define SYNQUACER_PCI_SEG1_DBI_BASE 0x583c0000
171#define SYNQUACER_PCI_SEG1_EXS_BASE 0x58380000
172
173#define SIZE_16KB 0x00004000
174#define SIZE_64KB 0x00010000
175#define SIZE_1MB 0x00100000
176
177#define SYNQUACER_PCI_DBI_SIZE SIZE_16KB
178#define SYNQUACER_PCI_EXS_SIZE SIZE_64KB
179
180#define NUM_SQ_PCI_RC 2
181
182static const struct synquacer_pcie_base {
183 phys_addr_t cfg_base;
184 phys_addr_t dbi_base;
185 phys_addr_t exs_base;
186} synquacer_pci_bases[NUM_SQ_PCI_RC] = {
187 {
188 .cfg_base = SYNQUACER_PCI_SEG0_CONFIG_BASE,
189 .dbi_base = SYNQUACER_PCI_SEG0_DBI_BASE,
190 .exs_base = SYNQUACER_PCI_SEG0_EXS_BASE,
191 }, {
192 .cfg_base = SYNQUACER_PCI_SEG1_CONFIG_BASE,
193 .dbi_base = SYNQUACER_PCI_SEG1_DBI_BASE,
194 .exs_base = SYNQUACER_PCI_SEG1_EXS_BASE,
195 },
196};
197
198/**
199 * struct synquacer_ecam_pcie - synquacer_ecam PCIe controller state
200 * @cfg_base: The base address of memory mapped configuration space
201 */
202struct synquacer_ecam_pcie {
203 void *cfg_base;
204 pci_size_t size;
205 void *dbi_base;
206 void *exs_base;
207 int first_busno;
208
209 struct pci_region mem;
210 struct pci_region io;
211 struct pci_region mem64;
212};
213
214DECLARE_GLOBAL_DATA_PTR;
215
216/**
217 * pci_synquacer_ecam_conf_address() - Calculate the address of a config access
218 * @bus: Pointer to the PCI bus
219 * @bdf: Identifies the PCIe device to access
220 * @offset: The offset into the device's configuration space
221 * @paddress: Pointer to the pointer to write the calculates address to
222 *
223 * Calculates the address that should be accessed to perform a PCIe
224 * configuration space access for a given device identified by the PCIe
225 * controller device @pcie and the bus, device & function numbers in @bdf. If
226 * access to the device is not valid then the function will return an error
227 * code. Otherwise the address to access will be written to the pointer pointed
228 * to by @paddress.
229 */
230static int pci_synquacer_ecam_conf_address(const struct udevice *bus,
231 pci_dev_t bdf, uint offset,
232 void **paddress)
233{
234 struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
235 void *addr;
236
237 addr = pcie->cfg_base;
238 addr += (PCI_BUS(bdf) - pcie->first_busno) << 20;
239 addr += PCI_DEV(bdf) << 15;
240 addr += PCI_FUNC(bdf) << 12;
241 addr += offset;
242 *paddress = addr;
243
244 return 0;
245}
246
247static bool pci_synquacer_ecam_addr_valid(const struct udevice *bus,
248 pci_dev_t bdf)
249{
250 struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
251 int num_buses = DIV_ROUND_UP(pcie->size, 1 << 16);
252
253 /*
254 * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
255 * type 0 config TLPs sent to devices 1 and up on its downstream port,
256 * resulting in devices appearing multiple times on bus 0 unless we
257 * filter out those accesses here.
258 */
259 if (PCI_BUS(bdf) == pcie->first_busno && PCI_DEV(bdf) > 0)
260 return false;
261
262 return (PCI_BUS(bdf) >= pcie->first_busno &&
263 PCI_BUS(bdf) < pcie->first_busno + num_buses);
264}
265
266/**
267 * pci_synquacer_ecam_read_config() - Read from configuration space
268 * @bus: Pointer to the PCI bus
269 * @bdf: Identifies the PCIe device to access
270 * @offset: The offset into the device's configuration space
271 * @valuep: A pointer at which to store the read value
272 * @size: Indicates the size of access to perform
273 *
274 * Read a value of size @size from offset @offset within the configuration
275 * space of the device identified by the bus, device & function numbers in @bdf
276 * on the PCI bus @bus.
277 */
278static int pci_synquacer_ecam_read_config(const struct udevice *bus,
279 pci_dev_t bdf, uint offset,
280 ulong *valuep, enum pci_size_t size)
281{
282 if (!pci_synquacer_ecam_addr_valid(bus, bdf)) {
283 *valuep = pci_get_ff(size);
284 return 0;
285 }
286
287 return pci_generic_mmap_read_config(bus, pci_synquacer_ecam_conf_address,
288 bdf, offset, valuep, size);
289}
290
291/**
292 * pci_synquacer_ecam_write_config() - Write to configuration space
293 * @bus: Pointer to the PCI bus
294 * @bdf: Identifies the PCIe device to access
295 * @offset: The offset into the device's configuration space
296 * @value: The value to write
297 * @size: Indicates the size of access to perform
298 *
299 * Write the value @value of size @size from offset @offset within the
300 * configuration space of the device identified by the bus, device & function
301 * numbers in @bdf on the PCI bus @bus.
302 */
303static int pci_synquacer_ecam_write_config(struct udevice *bus, pci_dev_t bdf,
304 uint offset, ulong value,
305 enum pci_size_t size)
306{
307 if (!pci_synquacer_ecam_addr_valid(bus, bdf))
308 return 0;
309
310 return pci_generic_mmap_write_config(bus, pci_synquacer_ecam_conf_address,
311 bdf, offset, value, size);
312}
313
314/**
315 * pci_synquacer_ecam_of_to_plat() - Translate from DT to device state
316 * @dev: A pointer to the device being operated on
317 *
318 * Translate relevant data from the device tree pertaining to device @dev into
319 * state that the driver will later make use of. This state is stored in the
320 * device's private data structure.
321 *
322 * Return: 0 on success, else -EINVAL
323 */
324static int pci_synquacer_ecam_of_to_plat(struct udevice *dev)
325{
326 struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
327 struct fdt_resource reg_res;
328 int i, err;
329
330 debug("%s: called for %s\n", __func__, dev->name);
331
332 err = fdt_get_resource(gd->fdt_blob, dev_of_offset(dev), "reg",
333 0, &reg_res);
334 if (err < 0) {
335 pr_err("\"reg\" resource not found\n");
336 return err;
337 }
338
339 /* Find the correct pair of the DBI/EXS base address */
340 for (i = 0; i < NUM_SQ_PCI_RC; i++) {
341 if (synquacer_pci_bases[i].cfg_base == reg_res.start)
342 break;
343 }
344 if (i == NUM_SQ_PCI_RC) {
345 pr_err("Unknown ECAM base address %lx.\n",
346 (unsigned long)reg_res.start);
347 return -ENOENT;
348 }
349 pcie->dbi_base = map_physmem(synquacer_pci_bases[i].dbi_base,
350 SYNQUACER_PCI_DBI_SIZE, MAP_NOCACHE);
351 if (!pcie->dbi_base) {
352 pr_err("Failed to map DBI for %s\n", dev->name);
353 return -ENOMEM;
354 }
355
356 pcie->exs_base = map_physmem(synquacer_pci_bases[i].exs_base,
357 SYNQUACER_PCI_EXS_SIZE, MAP_NOCACHE);
358 if (!pcie->exs_base) {
359 pr_err("Failed to map EXS for %s\n", dev->name);
360 return -ENOMEM;
361 }
362
363 pcie->size = fdt_resource_size(&reg_res);
364 pcie->cfg_base = map_physmem(reg_res.start, pcie->size, MAP_NOCACHE);
365 if (!pcie->cfg_base) {
366 pr_err("Failed to map config space for %s\n", dev->name);
367 return -ENOMEM;
368 }
369 debug("mappings DBI: %p EXS: %p CFG: %p\n", pcie->dbi_base, pcie->exs_base, pcie->cfg_base);
370
371 return 0;
372}
373
374static void pci_synquacer_pre_init(struct synquacer_ecam_pcie *pcie)
375{
376 void *base = pcie->exs_base;
377
378 masked_writel(base, EM_SELECT, PRE_DET_STT_SEL, 0);
379 masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 0);
380 masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 1);
381
382 /* 1: Assert all PHY / LINK resets */
383 masked_writel(base, RESET_SELECT_1, PERST_SEL, 0);
384 masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 0);
385 masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 0);
386
387 /* Device Reset(PERST#) is effective afrer Set device_type (RC) */
388 masked_writel(base, RESET_SELECT_1, PWUP_RST_SEL, 0);
389 masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 0);
390 masked_writel(base, RESET_SELECT_1, BUTTON_RST_SEL, 0);
391 masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 0);
392 masked_writel(base, RESET_SELECT_1, PWR_RST_SEL, 1);
393 masked_writel(base, RESET_SELECT_2, MSTR_ARST_SEL, 1);
394 masked_writel(base, RESET_SELECT_2, SLV_ARST_SEL, 1);
395 masked_writel(base, RESET_SELECT_2, DBI_ARST_SEL, 1);
396 masked_writel(base, RESET_SELECT_1, CORE_RST_SEL, 1);
397 masked_writel(base, RESET_SELECT_1, STI_RST_SEL, 1);
398 masked_writel(base, RESET_SELECT_1, N_STI_RST_SEL, 1);
399 masked_writel(base, RESET_SELECT_1, SQU_RST_SEL, 1);
400 masked_writel(base, RESET_SELECT_1, PHY_RST_SEL, 1);
401
402 /* 2: Set P<n>_app_ltssm_enable='0' for reprogramming before linkup. */
403 masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 0);
404
405 /* 3: Set device_type (RC) */
406 masked_writel(base, CORE_CONTROL, DEVICE_TYPE, 4);
407}
408
409static void pci_synquacer_dbi_init(void *dbi_base)
410{
411 masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 1);
412 /* 4 Lanes */
413 masked_writel(dbi_base, LINK_CAPABILITIES_REG,
414 PCIE_CAP_MAX_LINK_WIDTH, 4);
415 /* Gen 2 */
416 masked_writel(dbi_base, LINK_CAPABILITIES_REG,
417 PCIE_CAP_MAX_LINK_SPEED, 2);
418
419 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
420 BASE_CLASS_CODE, BASE_CLASS_CODE_VALUE);
421 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
422 SUBCLASS_CODE, SUBCLASS_CODE_VALUE);
423 masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
424 PROGRAM_INTERFACE, PROGRAM_INTERFACE_VALUE);
425
426 masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 0);
427}
428
429static void pcie_sq_prog_outbound_atu(void *dbi_base, int index,
430 u64 cpu_base, u64 pci_base, u64 size,
431 u32 type, u32 flags)
432{
433 debug("%s: %p, %d, %llx, %llx, %llx, %x, %x\n", __func__,
434 dbi_base, index, cpu_base, pci_base, size, type, flags);
435
436 writel(IATU_VIEWPORT_OUTBOUND | IATU_VIEWPORT_REGION_INDEX(index),
437 dbi_base + IATU_VIEWPORT_OFF);
438
439 writel((u32)(cpu_base & 0xffffffff),
440 dbi_base + IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0);
441 writel((u32)(cpu_base >> 32),
442 dbi_base + IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0);
443 writel((u32)(cpu_base + size - 1),
444 dbi_base + IATU_LIMIT_ADDR_OFF_OUTBOUND_0);
445
446 writel((u32)(pci_base & 0xffffffff),
447 dbi_base + IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0);
448 writel((u32)(pci_base >> 32),
449 dbi_base + IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0);
450
451 writel(type, dbi_base + IATU_REGION_CTRL_1_OFF_OUTBOUND_0);
452 writel(IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN | flags,
453 dbi_base + IATU_REGION_CTRL_2_OFF_OUTBOUND_0);
454}
455
456static void pci_synquacer_post_init(struct synquacer_ecam_pcie *pcie)
457{
458 void *base = pcie->exs_base;
459
460 /*
461 * 4: Set Bifurcation 1=disable 4=able
462 * 5: Supply Reference (It has executed)
463 * 6: Wait for 10usec (Reference Clocks is stable)
464 * 7: De assert PERST#
465 */
466 masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 1);
467 masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 1);
468
469 /* 8: Assert SYS_AUX_PWR_DET */
470 masked_writel(base, PM_CONTROL_2, SYS_AUX_PWR_DET, 1);
471
472 /* 9: Supply following clocks */
473 masked_writel(base, AXI_CLK_STOP, MSTR_CSYSREQ_REG, 1);
474 masked_writel(base, AXI_CLK_STOP, MSTR_ACLK_STOP, 0);
475 masked_writel(base, AXI_CLK_STOP, SLV_CSYSREQ_REG, 1);
476 masked_writel(base, AXI_CLK_STOP, SLV_ACLK_STOP, 0);
477 masked_writel(base, AXI_CLK_STOP, DBI_CSYSREQ_REG, 1);
478 masked_writel(base, AXI_CLK_STOP, DBI_ACLK_STOP, 0);
479
480 /*
481 * 10: De assert PHY reset
482 * 11: De assert LINK's PMC reset
483 */
484 masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 1);
485 masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 1);
486
487 /* 12: PHY auto
488 * 13: Wrapper auto
489 * 14-17: PHY auto
490 * 18: Wrapper auto
491 * 19: Update registers through DBI AXI Slave interface
492 */
493 pci_synquacer_dbi_init(pcie->dbi_base);
494
495 or_writel(pcie->dbi_base, PCI_COMMAND,
496 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
497
498 /* Force link speed change to Gen2 at link up */
499 or_writel(pcie->dbi_base, GEN2_CONTROL_OFF, DIRECT_SPEED_CHANGE);
500
501 /* Region 0: MMIO32 range */
502 pcie_sq_prog_outbound_atu(pcie->dbi_base, 0,
503 pcie->mem.phys_start,
504 pcie->mem.bus_start,
505 pcie->mem.size,
506 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
507 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
508 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
509
510 /* Region 1: Type 0 config space */
511 pcie_sq_prog_outbound_atu(pcie->dbi_base, 1,
512 (u64)pcie->cfg_base,
513 0,
514 SIZE_64KB,
515 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0,
516 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
517
518 /* Region 2: Type 1 config space */
519 pcie_sq_prog_outbound_atu(pcie->dbi_base, 2,
520 (u64)pcie->cfg_base + SIZE_64KB,
521 0,
522 (u64)pcie->io.phys_start - (u64)pcie->cfg_base - SIZE_64KB,
523 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1,
524 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
525
526 /* Region 3: port I/O range */
527 pcie_sq_prog_outbound_atu(pcie->dbi_base, 3,
528 pcie->io.phys_start,
529 pcie->io.bus_start,
530 pcie->io.size,
531 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO,
532 0);
533
534 /* Region 4: MMIO64 range */
535 pcie_sq_prog_outbound_atu(pcie->dbi_base, 4,
536 pcie->mem64.phys_start,
537 pcie->mem64.bus_start,
538 pcie->mem64.size,
539 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
540 IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
541 IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
542
543 /* enable link */
544 if (masked_readl(base, CORE_CONTROL, APP_LTSSM_ENABLE) == 0)
545 masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 1);
546}
547
548static int pci_synquacer_ecam_probe(struct udevice *dev)
549{
550 struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
551 struct udevice *ctlr = pci_get_controller(dev);
552 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
553
554 debug("Probe synquacer pcie for bus %d\n", dev_seq(dev));
555 pcie->first_busno = dev_seq(dev);
556
557 /* Store the IO and MEM windows settings for configuring ATU */
558 pcie->io.phys_start = hose->regions[0].phys_start; /* IO base */
559 pcie->io.bus_start = hose->regions[0].bus_start; /* IO_bus_addr */
560 pcie->io.size = hose->regions[0].size; /* IO size */
561
562 pcie->mem.phys_start = hose->regions[1].phys_start; /* MEM base */
563 pcie->mem.bus_start = hose->regions[1].bus_start; /* MEM_bus_addr */
564 pcie->mem.size = hose->regions[1].size; /* MEM size */
565
566 pcie->mem64.phys_start = hose->regions[2].phys_start; /* MEM64 base */
567 pcie->mem64.bus_start = hose->regions[2].bus_start; /* MEM64_bus_addr */
568 pcie->mem64.size = hose->regions[2].size; /* MEM64 size */
569
570 pci_synquacer_pre_init(pcie);
571
572 mdelay(150);
573
574 pci_synquacer_post_init(pcie);
575
576 /* It takes a while to stabilize the PCIe bus for scanning */
577 mdelay(100);
578
579 return 0;
580}
581
582static const struct dm_pci_ops pci_synquacer_ecam_ops = {
583 .read_config = pci_synquacer_ecam_read_config,
584 .write_config = pci_synquacer_ecam_write_config,
585};
586
587static const struct udevice_id pci_synquacer_ecam_ids[] = {
588 { .compatible = "socionext,synquacer-pcie-ecam" },
589 { }
590};
591
592U_BOOT_DRIVER(pci_synquacer_ecam) = {
593 .name = "pci_synquacer_ecam",
594 .id = UCLASS_PCI,
595 .of_match = pci_synquacer_ecam_ids,
596 .ops = &pci_synquacer_ecam_ops,
597 .probe = pci_synquacer_ecam_probe,
598 .of_to_plat = pci_synquacer_ecam_of_to_plat,
599 .priv_auto = sizeof(struct synquacer_ecam_pcie),
600};