blob: 89f76d06abcd43f588c2a527e459dc224ff52e1a [file] [log] [blame]
Sheetal Tigadoli58a9eca2019-12-18 20:05:09 +05301/*
2 * Copyright (c) 2016 - 2020, Broadcom
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <errno.h>
8#include <stdbool.h>
9
10#include <common/debug.h>
11#include <drivers/delay_timer.h>
12#include <lib/mmio.h>
13
14#include <paxb.h>
15#include <sr_def.h>
16#include <sr_utils.h>
17
18#define PCIE_CORE_PWR_ARR_POWERON 0x8
19#define PCIE_CORE_PWR_ARR_POWEROK 0x4
20#define PCIE_CORE_PWR_POWERON 0x2
21#define PCIE_CORE_PWR_POWEROK 0x1
22
23#define PCIE_CORE_USER_CFG (PCIE_CORE_BASE + 0x38)
24#define PCIE_PAXB_SMMU_SID_CFG (PCIE_CORE_BASE + 0x60)
25#ifdef SID_B8_D1_F1
26#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x8 << 8)
27#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x1 << 12)
28#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x1 << 16)
29#else
30#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x2 << 8)
31#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x5 << 12)
32#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x3 << 16)
33#endif
34
35#define PAXB_APB_TIMEOUT_COUNT_OFFSET 0x034
36
37/* allow up to 5 ms for each power switch to stabilize */
38#define PCIE_CORE_PWR_TIMEOUT_MS 5
39
40/* wait 1 microsecond for PCIe core soft reset */
41#define PCIE_CORE_SOFT_RST_DELAY_US 1
42
43/*
44 * List of PAXB APB registers
45 */
46#define PAXB_BASE 0x48000000
47#define PAXB_BASE_OFFSET 0x4000
48#define PAXB_OFFSET(core) (PAXB_BASE + \
49 (core) * PAXB_BASE_OFFSET)
50
51#define PAXB_CLK_CTRL_OFFSET 0x000
52#define PAXB_EP_PERST_SRC_SEL_MASK (1 << 2)
53#define PAXB_EP_MODE_PERST_MASK (1 << 1)
54#define PAXB_RC_PCIE_RST_OUT_MASK (1 << 0)
55
56#define PAXB_MAX_IMAP_WINDOWS 8
57#define PAXB_IMAP_REG_WIDTH 8
58#define PAXB_IMAP0_REG_WIDTH 4
59#define PAXB_AXUSER_REG_WIDTH 4
60
61#define PAXB_CFG_IND_ADDR_OFFSET 0x120
62#define PAXB_CFG_IND_DATA_OFFSET 0x124
63#define PAXB_CFG_IND_ADDR_MASK 0x1ffc
64#define PAXB_CFG_CFG_TYPE_MASK 0x1
65
66#define PAXB_EP_CFG_ADDR_OFFSET 0x1f8
67#define PAXB_EP_CFG_DATA_OFFSET 0x1fc
68#define PAXB_EP_CFG_ADDR_MASK 0xffc
69#define PAXB_EP_CFG_TYPE_MASK 0x1
70
71#define PAXB_0_DEFAULT_IMAP 0xed0
72#define DEFAULT_ADDR_INVALID BIT(0)
73#define PAXB_0_DEFAULT_IMAP_AXUSER 0xed8
74#define PAXB_0_DEFAULT_IMAP_AXCACHE 0xedc
75#define IMAP_AXCACHE 0xff
76#define OARR_VALID BIT(0)
77#define IMAP_VALID BIT(0)
78
79#define PAXB_IMAP0_BASE_OFFSET 0xc00
80#define PAXB_IARR0_BASE_OFFSET 0xd00
81#define PAXB_IMAP0_OFFSET(idx) (PAXB_IMAP0_BASE_OFFSET + \
82 (idx) * PAXB_IMAP0_REG_WIDTH)
83#define PAXB_IMAP0_WINDOW_SIZE 0x1000
84
85#define PAXB_IMAP2_OFFSET 0xcc0
86#define PAXB_IMAP0_REGS_TYPE_OFFSET 0xcd0
87#define PAXB_IARR2_LOWER_OFFSET 0xd10
88
89#define PAXB_IMAP3_BASE_OFFSET 0xe08
90#define PAXB_IMAP3_OFFSET(idx) (PAXB_IMAP3_BASE_OFFSET + \
91 (idx) * PAXB_IMAP_REG_WIDTH)
92
93#define PAXB_IMAP3_0_AXUSER_B_OFFSET 0xe48
94#define PAXB_IMAP3_0_AXUSER_OFFSET(idx) (PAXB_IMAP3_0_AXUSER_B_OFFSET + \
95 (idx) * PAXB_AXUSER_REG_WIDTH)
96
97#define PAXB_IMAP4_BASE_OFFSET 0xe70
98#define PAXB_IMAP4_OFFSET(idx) (PAXB_IMAP4_BASE_OFFSET + \
99 (idx) * PAXB_IMAP_REG_WIDTH)
100
101#define PAXB_IMAP4_0_AXUSER_B_OFFSET 0xeb0
102#define PAXB_IMAP4_0_AXUSER_OFFSET(idx) (PAXB_IMAP4_0_AXUSER_B_OFFSET + \
103 (idx) * PAXB_AXUSER_REG_WIDTH)
104
105#define PAXB_CFG_LINK_STATUS_OFFSET 0xf0c
106#define PAXB_CFG_PHYLINKUP_MASK (1 << 3)
107#define PAXB_CFG_DL_ACTIVE_MASK (1 << 2)
108
109#define PAXB_IMAP0_0_AXUSER_OFFSET 0xf60
110#define PAXB_IMAP2_AXUSER_OFFSET 0xfe0
111
112/* cacheable write-back, allocate on both reads and writes */
113#define IMAP_ARCACHE 0x0f0
114#define IMAP_AWCACHE 0xf00
115/* normal access, nonsecure access, and data access */
116/* AWQOS:0xe and ARQOS:0xa */
117/* AWPROT:0x2 and ARPROT:0x1 */
118#define IMAP_AXUSER 0x002e002a
119
120/*
121 * List of NIC security and PIPEMUX related registers
122 */
123#define SR_PCIE_NIC_SECURITY_BASE 0x58100000
124#define NS3Z_PCIE_NIC_SECURITY_BASE 0x48100000
125
126#define GITS_TRANSLATER 0x63c30000
127
128#define VENDOR_ID 0x14e4
129#define CFG_RC_DEV_ID 0x434
130#define CFG_RC_DEV_SUBID 0x438
131#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
132#define PCI_CLASS_BRIDGE_MASK 0xffff00
133#define PCI_CLASS_BRIDGE_SHIFT 8
134#define PCI_CLASS_BRIDGE_PCI 0x0604
135
136/*
137 * List of PAXB RC configuration space registers
138 */
139
140/* first capability list entry */
141#define PCI_CAPABILITY_LIST_OFFSET 0x34
142#define PCI_CAPABILITY_SPEED_OFFSET 0xc
143#define PCI_EP_CAPABILITY_OFFSET 0x10
144
145#define CFG_RC_LINK_STATUS_CTRL_2 0x0dc
146#define CFG_RC_LINK_SPEED_SHIFT 0
147#define CFG_RC_LINK_SPEED_MASK (0xf << CFG_RC_LINK_SPEED_SHIFT)
148
149#define CFG_RC_DEVICE_CAP 0x4d4
150#define CFG_RC_DEVICE_CAP_MPS_SHIFT 0
151#define CFG_RC_DEVICE_CAP_MPS_MASK (0x7 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
152/* MPS 256 bytes */
153#define CFG_RC_DEVICE_CAP_MPS_256B (0x1 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
154/* MPS 512 bytes */
155#define CFG_RC_DEVICE_CAP_MPS_512B (0x2 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
156
157#define CFG_RC_TL_FCIMM_NP_LIMIT 0xa10
158#define CFG_RC_TL_FCIMM_NP_VAL 0x01500000
159#define CFG_RC_TL_FCIMM_P_LIMIT 0xa14
160#define CFG_RC_TL_FCIMM_P_VAL 0x03408080
161
162#define CFG_RC_LINK_CAP 0x4dc
163#define CFG_RC_LINK_CAP_SPEED_SHIFT 0
164#define CFG_RC_LINK_CAP_SPEED_MASK (0xf << CFG_RC_LINK_CAP_SPEED_SHIFT)
165#define CFG_RC_LINK_CAP_WIDTH_SHIFT 4
166#define CFG_RC_LINK_CAP_WIDTH_MASK (0x1f << CFG_RC_LINK_CAP_WIDTH_SHIFT)
167
168#define CFG_LINK_CAP_RC 0x4f0
169#define CFG_RC_DL_ACTIVE_SHIFT 0
170#define CFG_RC_DL_ACTIVE_MASK (0x1 << CFG_RC_DL_ACTIVE_SHIFT)
171#define CFG_RC_SLOT_CLK_SHIFT 1
172#define CFG_RC_SLOT_CLK_MASK (0x1 << CFG_RC_SLOT_CLK_SHIFT)
173
174#define CFG_ROOT_CAP_RC 0x4f8
175#define CFG_ROOT_CAP_LTR_SHIFT 1
176#define CFG_ROOT_CAP_LTR_MASK (0x1 << CFG_ROOT_CAP_LTR_SHIFT)
177
178#define CFG_RC_CLKREQ_ENABLED 0x4fc
179#define CFG_RC_CLKREQ_ENABLED_SHIFT 0
180#define CFG_RC_CLKREQ_ENABLED_MASK (0x1 << CFG_RC_CLKREQ_ENABLED_SHIFT)
181
182#define CFG_RC_COEFF_ADDR 0x638
183
184#define CFG_RC_TL_CTRL_0 0x800
185#define RC_MEM_DW_CHK_MASK 0x03fe
186
187#define CFG_RC_PDL_CTRL_4 0x1010
188#define NPH_FC_INIT_SHIFT 24
189#define NPH_FC_INIT_MASK (U(0xff) << NPH_FC_INIT_SHIFT)
190#define PD_FC_INIT_SHIFT 12
191#define PD_FC_INIT_MASK (0xffff << PD_FC_INIT_SHIFT)
192
193#define CFG_RC_PDL_CTRL_5 0x1014
194#define PH_INIT_SHIFT 0
195#define PH_INIT_MASK (0xff << PH_INIT_SHIFT)
196
197#define DL_STATUS_OFFSET 0x1048
198#define PHYLINKUP BIT(13)
199
200#define PH_INIT 0x10
201#define PD_FC_INIT 0x100
202#define NPH_FC_INIT 0x8
203
204#define SRP_PH_INIT 0x7F
205#define SRP_PD_FC_INIT 0x200
206#define SRP_NPH_FC_INIT 0x7F
207
208#define CFG_ADDR_BUS_NUM_SHIFT 20
209#define CFG_ADDR_DEV_NUM_SHIFT 15
210#define CFG_ADDR_FUNC_NUM_SHIFT 12
211#define CFG_ADDR_REG_NUM_SHIFT 2
212#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
213#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
214
215#define DL_LINK_UP_TIMEOUT_MS 1000
216
217#define CFG_RETRY_STATUS 0xffff0001
218#define CRS_TIMEOUT_MS 5000
219
220/* create EP config data to write */
221#define DEF_BUS_NO 1 /* default bus 1 */
222#define DEF_SLOT_NO 0 /* default slot 0 */
223#define DEF_FN_NO 0 /* default fn 0 */
224
225#define EP_CONFIG_VAL(bus_no, slot, fn, where) \
226 (((bus_no) << CFG_ADDR_BUS_NUM_SHIFT) | \
227 ((slot) << CFG_ADDR_DEV_NUM_SHIFT) | \
228 ((fn) << CFG_ADDR_FUNC_NUM_SHIFT) | \
229 ((where) & CFG_ADDR_REG_NUM_MASK) | \
230 (1 & CFG_ADDR_CFG_TYPE_MASK))
231
232/* PAXB security offset */
233#define PAXB_SECURITY_IDM_OFFSET 0x1c
234#define PAXB_SECURITY_APB_OFFSET 0x24
235#define PAXB_SECURITY_ECAM_OFFSET 0x3c
236
237#define paxb_get_config(type) paxb_get_##type##_config()
238
239static unsigned int paxb_sec_reg_offset[] = {
240 0x0c, /* PAXB0 AXI */
241 0x10, /* PAXB1 AXI */
242 0x14, /* PAXB2 AXI */
243 0x18, /* PAXB3 AXI */
244 0x20, /* PAXB4 AXI */
245 0x28, /* PAXB5 AXI */
246 0x2c, /* PAXB6 AXI */
247 0x30, /* PAXB7 AXI */
248 0x24, /* PAXB APB */
249};
250
251const paxb_cfg *paxb;
252
253/*
254 * Given a PIPEMUX strap and PCIe core index, this function returns 1 if a
255 * PCIe core needs to be enabled
256 */
257int pcie_core_needs_enable(unsigned int core_idx)
258{
259 if (paxb->core_needs_enable)
260 return paxb->core_needs_enable(core_idx);
261
262 return 0;
263}
264
265static void pcie_set_default_tx_coeff(uint32_t core_idx, uint32_t link_width)
266{
267 unsigned int lanes = 0;
268 uint32_t data, addr;
269
270 addr = CFG_RC_COEFF_ADDR;
271 for (lanes = 0; lanes < link_width; lanes = lanes + 2) {
272 data = paxb_rc_cfg_read(core_idx, addr);
273 data &= 0xf0f0f0f0;
274 data |= (7 & 0xf);
275 data |= (7 & 0xf) << 8;
276 data |= (7 & 0xf) << 16;
277 data |= (7 & 0xf) << 24;
278
279 paxb_rc_cfg_write(core_idx, addr, data);
280 addr += 4;
281 }
282}
283
284static int paxb_rc_link_init(void)
285{
286 uint32_t val, link_speed;
287 unsigned int link_width;
288 uint32_t core_idx;
289
290 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
291 if (!pcie_core_needs_enable(core_idx))
292 continue;
293
294 link_width = paxb->get_link_width(core_idx);
295 if (!link_width) {
296 ERROR("Unsupported PIPEMUX\n");
297 return -EOPNOTSUPP;
298 }
299
300 link_speed = paxb->get_link_speed();
301 /* program RC's link cap reg to advertise proper link width */
302 val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
303 val &= ~CFG_RC_LINK_CAP_WIDTH_MASK;
304 val |= (link_width << CFG_RC_LINK_CAP_WIDTH_SHIFT);
305 paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
306
307 /* program RC's link cap reg to advertise proper link speed */
308 val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
309 val &= ~CFG_RC_LINK_CAP_SPEED_MASK;
310 val |= link_speed << CFG_RC_LINK_CAP_SPEED_SHIFT;
311 paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
312
313 /* also need to program RC's link status control register */
314 val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_STATUS_CTRL_2);
315 val &= ~(CFG_RC_LINK_SPEED_MASK);
316 val |= link_speed << CFG_RC_LINK_SPEED_SHIFT;
317 paxb_rc_cfg_write(core_idx, CFG_RC_LINK_STATUS_CTRL_2, val);
318
319#ifdef WAR_PLX_PRESET_PARITY_FAIL
320 /* WAR to avoid crash with PLX switch in GEN3*/
321 /* While PRESET, PLX switch is not fixing parity so disabled */
322 val = paxb_rc_cfg_read(core_idx, CFG_RC_REG_PHY_CTL_10);
323 val &= ~(PHY_CTL_10_GEN3_MATCH_PARITY);
324 paxb_rc_cfg_write(core_idx, CFG_RC_REG_PHY_CTL_10, val);
325#endif
326 pcie_set_default_tx_coeff(core_idx, link_width);
327 }
328 return 0;
329}
330
331#ifdef PAXB_LINKUP
332static void paxb_perst_ctrl(unsigned int core_idx, bool assert)
333{
334 uint32_t clk_ctrl = PAXB_OFFSET(core_idx) + PAXB_CLK_CTRL_OFFSET;
335
336 if (assert) {
337 mmio_clrbits_32(clk_ctrl, PAXB_EP_PERST_SRC_SEL_MASK |
338 PAXB_EP_MODE_PERST_MASK |
339 PAXB_RC_PCIE_RST_OUT_MASK);
340 udelay(250);
341 } else {
342 mmio_setbits_32(clk_ctrl, PAXB_RC_PCIE_RST_OUT_MASK);
343 mdelay(100);
344 }
345}
346
347static void paxb_start_link_up(void)
348{
349 unsigned int core_idx;
350 uint32_t val, timeout;
351
352 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
353 if (!pcie_core_needs_enable(core_idx))
354 continue;
355
356 /* toggle PERST */
357 paxb_perst_ctrl(core_idx, true);
358 paxb_perst_ctrl(core_idx, false);
359
360 timeout = DL_LINK_UP_TIMEOUT_MS;
361 /* wait for Link up */
362 do {
363 val = mmio_read_32(PAXB_OFFSET(core_idx) +
364 PAXB_CFG_LINK_STATUS_OFFSET);
365 if (val & PAXB_CFG_DL_ACTIVE_MASK)
366 break;
367
368 mdelay(1);
369 } while (--timeout);
370
371 if (!timeout)
372 ERROR("PAXB core %u link is down\n", core_idx);
373 }
374}
375#endif
376
377static void pcie_core_soft_reset(unsigned int core_idx)
378{
379 uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
380 uintptr_t ctrl = (uintptr_t)(PCIE_CORE_SOFT_RST_CFG_BASE + offset);
381
382 /* Put PCIe core in soft reset */
383 mmio_clrbits_32(ctrl, PCIE_CORE_SOFT_RST);
384
385 /* Wait for 1 us before pulling PCIe core out of soft reset */
386 udelay(PCIE_CORE_SOFT_RST_DELAY_US);
387
388 mmio_setbits_32(ctrl, PCIE_CORE_SOFT_RST);
389}
390
391static int pcie_core_pwron_switch(uintptr_t ctrl, uintptr_t status,
392 uint32_t mask)
393{
394 uint32_t val;
395 unsigned int timeout = PCIE_CORE_PWR_TIMEOUT_MS;
396
397 /* enable switch */
398 mmio_setbits_32(ctrl, mask);
399
400 /* now wait for it to stabilize */
401 do {
402 val = mmio_read_32(status);
403 if ((val & mask) == mask)
404 return 0;
405 mdelay(1);
406 } while (--timeout);
407
408 return -EIO;
409}
410
411static int pcie_core_pwr_seq(uintptr_t ctrl, uintptr_t status)
412{
413 int ret;
414
415 /*
416 * Enable the switch with the following sequence:
417 * 1. Array weak switch output switch
418 * 2. Array strong switch
419 * 3. Weak switch output acknowledge
420 * 4. Strong switch output acknowledge
421 */
422 ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWERON);
423 if (ret)
424 return ret;
425
426 ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWEROK);
427 if (ret)
428 return ret;
429
430 ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWERON);
431 if (ret)
432 return ret;
433
434 ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWEROK);
435 if (ret)
436 return ret;
437
438 return 0;
439}
440
441/*
442 * This function enables PCIe core and PAXB memory buffer power, and then
443 * remove the PCIe core from isolation
444 */
445static int pcie_core_pwr_init(unsigned int core_idx)
446{
447 int ret;
448 uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
449 uintptr_t ctrl, status;
450
451 /* enable mem power to PCIe core */
452 ctrl = (uintptr_t)(PCIE_CORE_MEM_PWR_BASE + offset);
453 status = (uintptr_t)(PCIE_CORE_MEM_PWR_STATUS_BASE + offset);
454 ret = pcie_core_pwr_seq(ctrl, status);
455 if (ret) {
456 ERROR("PCIe core mem power failed\n");
457 return ret;
458 }
459
460 /* now enable mem power to PAXB wrapper */
461 ctrl = (uintptr_t)(PCIE_PAXB_MEM_PWR_BASE + offset);
462 status = (uintptr_t)(PCIE_PAXB_MEM_PWR_STATUS_BASE + offset);
463 ret = pcie_core_pwr_seq(ctrl, status);
464 if (ret) {
465 ERROR("PAXB mem power failed\n");
466 return ret;
467 }
468
469 /* now remove power isolation */
470 ctrl = (uintptr_t)(PCIE_CORE_ISO_CFG_BASE + offset);
471 mmio_clrbits_32(ctrl, PCIE_CORE_ISO | PCIE_CORE_MEM_ISO);
472
473 return 0;
474}
475
476static void pcie_ss_reset(void)
477{
478 mmio_setbits_32(CDRU_MISC_RESET_CONTROL,
479 1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R);
480}
481
482/*
483 * This function reads the PIPEMUX strap, figures out all the PCIe cores that
484 * need to be enabled and enable the mem power for those cores
485 */
486static int pcie_cores_init(void)
487{
Sheetal Tigadolifcd106c2020-04-13 18:43:29 +0530488 int ret = 0;
Sheetal Tigadoli58a9eca2019-12-18 20:05:09 +0530489 uint32_t core_idx;
490
491 if (paxb->pipemux_init) {
492 ret = paxb->pipemux_init();
493 if (ret)
494 return ret;
495 }
496
497 /* bring PCIe subsystem out of reset */
498 pcie_ss_reset();
499
500 /* power up all PCIe cores that will be used as RC */
501 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
502 if (!pcie_core_needs_enable(core_idx))
503 continue;
504
505 ret = pcie_core_pwr_init(core_idx);
506 if (ret) {
507 ERROR("PCIe core %u power up failed\n", core_idx);
508 return ret;
509 }
510
511 pcie_core_soft_reset(core_idx);
512
513 VERBOSE("PCIe core %u is powered up\n", core_idx);
514 }
515
516 return ret;
517}
518
519void paxb_rc_cfg_write(unsigned int core_idx, unsigned int where,
520 uint32_t val)
521{
522 mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
523 (where & PAXB_CFG_IND_ADDR_MASK) |
524 PAXB_CFG_CFG_TYPE_MASK);
525 mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET, val);
526}
527
528unsigned int paxb_rc_cfg_read(unsigned int core_idx, unsigned int where)
529{
530 unsigned int val;
531
532 mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
533 (where & PAXB_CFG_IND_ADDR_MASK) |
534 PAXB_CFG_CFG_TYPE_MASK);
535 val = mmio_read_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET);
536
537 return val;
538}
539
540static void paxb_cfg_mps(void)
541{
542 uint32_t val, core_idx, mps;
543
544 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
545 if (!pcie_core_needs_enable(core_idx))
546 continue;
547
548 val = paxb_rc_cfg_read(core_idx, CFG_RC_DEVICE_CAP);
549 val &= ~CFG_RC_DEVICE_CAP_MPS_MASK;
550 mps = CFG_RC_DEVICE_CAP_MPS_256B;
551 if (core_idx == 0 || core_idx == 1 ||
552 core_idx == 6 || core_idx == 7) {
553 mps = CFG_RC_DEVICE_CAP_MPS_512B;
554 }
555 val |= mps;
556 paxb_rc_cfg_write(core_idx, CFG_RC_DEVICE_CAP, val);
557 }
558}
559
560static void paxb_cfg_dev_id(void)
561{
562 uint32_t val, core_idx;
563 uint32_t device_id;
564
565 device_id = paxb->device_id;
566
567 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
568 if (!pcie_core_needs_enable(core_idx))
569 continue;
570
571 /* Set Core in RC mode */
572 mmio_setbits_32(PCIE_CORE_USER_CFG +
573 (core_idx * PCIE_CORE_PWR_OFFSET), 1);
574
575 /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
576 val = paxb_rc_cfg_read(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET);
577 val &= ~PCI_CLASS_BRIDGE_MASK;
578 val |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
579 paxb_rc_cfg_write(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET, val);
580
581 val = (VENDOR_ID << 16) | device_id;
582 paxb_rc_cfg_write(core_idx, CFG_RC_DEV_ID, val);
583
584 val = (device_id << 16) | VENDOR_ID;
585 paxb_rc_cfg_write(core_idx, CFG_RC_DEV_SUBID, val);
586 }
587}
588
589static void paxb_cfg_tgt_trn(void)
590{
591 uint32_t val, core_idx;
592
593 /*
594 * Disable all mem Rd/Wr size check so it allows target read/write
595 * transactions to be more than stipulated DW. As a result, PAXB root
596 * complex will not abort these read/write transcations beyond
597 * stipulated limit
598 */
599 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
600 if (!pcie_core_needs_enable(core_idx))
601 continue;
602
603 val = paxb_rc_cfg_read(core_idx, CFG_RC_TL_CTRL_0);
604 val &= ~(RC_MEM_DW_CHK_MASK);
605 paxb_rc_cfg_write(core_idx, CFG_RC_TL_CTRL_0, val);
606 }
607}
608
609static void paxb_cfg_pdl_ctrl(void)
610{
611 uint32_t val, core_idx;
612 uint32_t nph, ph, pd;
613
614 /* increase the credit counter to 4 for non-posted header */
615 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
616 if (!pcie_core_needs_enable(core_idx))
617 continue;
618
619 nph = NPH_FC_INIT;
620 ph = PH_INIT;
621 pd = PD_FC_INIT;
622
623 if (core_idx == 0 || core_idx == 1 ||
624 core_idx == 6 || core_idx == 7) {
625 nph = SRP_NPH_FC_INIT;
626 ph = SRP_PH_INIT;
627 pd = SRP_PD_FC_INIT;
628 }
629 val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_4);
630 val &= ~NPH_FC_INIT_MASK;
631 val &= ~PD_FC_INIT_MASK;
632 val = val | (nph << NPH_FC_INIT_SHIFT);
633 val = val | (pd << PD_FC_INIT_SHIFT);
634 paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_4, val);
635
636 val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_5);
637 val &= ~PH_INIT_MASK;
638 val = val | (ph << PH_INIT_SHIFT);
639 paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_5, val);
640
641 /*
642 * ASIC to give more optmized value after further investigation.
643 * till then this is important to have to get similar
644 * performance on all the slots.
645 */
646 paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_NP_LIMIT,
647 CFG_RC_TL_FCIMM_NP_VAL);
648
649 paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_P_LIMIT,
650 CFG_RC_TL_FCIMM_P_VAL);
651 }
652}
653
654static void paxb_cfg_clkreq(void)
655{
656 uint32_t val, core_idx;
657
658 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
659 if (!pcie_core_needs_enable(core_idx))
660 continue;
661
662 val = paxb_rc_cfg_read(core_idx, CFG_RC_CLKREQ_ENABLED);
663 val &= ~CFG_RC_CLKREQ_ENABLED_MASK;
664 paxb_rc_cfg_write(core_idx, CFG_RC_CLKREQ_ENABLED, val);
665 }
666}
667
668static void paxb_cfg_dl_active(bool enable)
669{
670 uint32_t val, core_idx;
671
672 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
673 if (!pcie_core_needs_enable(core_idx))
674 continue;
675
676 val = paxb_rc_cfg_read(core_idx, CFG_LINK_CAP_RC);
677 if (enable)
678 val |= CFG_RC_DL_ACTIVE_MASK;
679 else
680 val &= ~CFG_RC_DL_ACTIVE_MASK;
681 paxb_rc_cfg_write(core_idx, CFG_LINK_CAP_RC, val);
682 }
683}
684
685static void paxb_cfg_LTR(int enable)
686{
687 uint32_t val, core_idx;
688
689 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
690 if (!pcie_core_needs_enable(core_idx))
691 continue;
692
693 val = paxb_rc_cfg_read(core_idx, CFG_ROOT_CAP_RC);
694 if (enable)
695 val |= CFG_ROOT_CAP_LTR_MASK;
696 else
697 val &= ~CFG_ROOT_CAP_LTR_MASK;
698 paxb_rc_cfg_write(core_idx, CFG_ROOT_CAP_RC, val);
699 }
700}
701
702static void paxb_ib_regs_bypass(void)
703{
704 unsigned int i, j;
705
706 for (i = 0; i < paxb->num_cores; i++) {
707 if (!pcie_core_needs_enable(i))
708 continue;
709
710 /* Configure Default IMAP window */
711 mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP,
712 DEFAULT_ADDR_INVALID);
713 mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXUSER,
714 IMAP_AXUSER);
715 mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXCACHE,
716 IMAP_AXCACHE);
717
718 /* Configure MSI IMAP window */
719 mmio_setbits_32(PAXB_OFFSET(i) +
720 PAXB_IMAP0_REGS_TYPE_OFFSET,
721 0x1);
722 mmio_write_32(PAXB_OFFSET(i) + PAXB_IARR0_BASE_OFFSET,
723 GITS_TRANSLATER | OARR_VALID);
724 for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
725 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
726 (GITS_TRANSLATER +
727 (j * PAXB_IMAP0_WINDOW_SIZE)) |
728 IMAP_VALID);
729 }
730 }
731}
732
733static void paxb_ib_regs_init(void)
734{
735 unsigned int core_idx;
736
737 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
738 if (!pcie_core_needs_enable(core_idx))
739 continue;
740
741 /* initialize IARR2 to zero */
742 mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_IARR2_LOWER_OFFSET,
743 0x0);
744 mmio_setbits_32(PAXB_OFFSET(core_idx) +
745 PAXB_IMAP0_REGS_TYPE_OFFSET,
746 0x1);
747 }
748}
749
750static void paxb_cfg_apb_timeout(void)
751{
752 unsigned int core_idx;
753
754 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
755 if (!pcie_core_needs_enable(core_idx))
756 continue;
757
758 /* allow unlimited timeout */
759 mmio_write_32(PAXB_OFFSET(core_idx) +
760 PAXB_APB_TIMEOUT_COUNT_OFFSET,
761 0xFFFFFFFF);
762 }
763}
764
765static void paxb_smmu_cfg(void)
766{
767 unsigned int core_idx;
768 uint32_t offset;
769 uint32_t val;
770
771 for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
772 if (!pcie_core_needs_enable(core_idx))
773 continue;
774
775 offset = core_idx * PCIE_CORE_PWR_OFFSET;
776 val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
777 val &= ~(0xFFF00);
778 val |= (PAXB_SMMU_SID_CFG_FUN_WIDTH |
779 PAXB_SMMU_SID_CFG_DEV_WIDTH |
780 PAXB_SMMU_SID_CFG_BUS_WIDTH);
781 mmio_write_32(PCIE_PAXB_SMMU_SID_CFG + offset, val);
782 val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
783 VERBOSE("smmu cfg reg 0x%x\n", val);
784 }
785}
786
787static void paxb_cfg_coherency(void)
788{
789 unsigned int i, j;
790
791 for (i = 0; i < paxb->num_cores; i++) {
792 if (!pcie_core_needs_enable(i))
793 continue;
794
795#ifdef USE_DDR
796 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_OFFSET,
797 IMAP_ARCACHE | IMAP_AWCACHE);
798#endif
799
800 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_0_AXUSER_OFFSET,
801 IMAP_AXUSER);
802
803 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_AXUSER_OFFSET,
804 IMAP_AXUSER);
805
806 for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
807#ifdef USE_DDR
808 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP3_OFFSET(j),
809 IMAP_ARCACHE | IMAP_AWCACHE);
810 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP4_OFFSET(j),
811 IMAP_ARCACHE | IMAP_AWCACHE);
812#endif
813 /* zero out IMAP0 mapping windows for MSI/MSI-X */
814 mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
815 0x0);
816
817 mmio_write_32(PAXB_OFFSET(i) +
818 PAXB_IMAP3_0_AXUSER_OFFSET(j),
819 IMAP_AXUSER);
820 mmio_write_32(PAXB_OFFSET(i) +
821 PAXB_IMAP4_0_AXUSER_OFFSET(j),
822 IMAP_AXUSER);
823 }
824 }
825}
826
827/*
828 * This function configures all PAXB related blocks to allow non-secure access
829 */
830void paxb_ns_init(enum paxb_type type)
831{
832 unsigned int reg;
833
834 switch (type) {
835 case PAXB_SR:
836 for (reg = 0; reg < ARRAY_SIZE(paxb_sec_reg_offset); reg++) {
837
838 mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE +
839 paxb_sec_reg_offset[reg], 0x1);
840 }
841 /* Enabled all PAXB's relevant IDM blocks access in non-secure mode */
842 mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE + PAXB_SECURITY_IDM_OFFSET,
843 0xffff);
844 break;
845 case PAXB_NS3Z:
846 mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
847 paxb_sec_reg_offset[0], 0x1);
848 mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
849 PAXB_SECURITY_IDM_OFFSET, 0xffff);
850 mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
851 PAXB_SECURITY_APB_OFFSET, 0x7);
852 mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
853 PAXB_SECURITY_ECAM_OFFSET, 0x1);
854 break;
855 }
856}
857
858static int paxb_set_config(void)
859{
860 paxb = paxb_get_config(sr);
861 if (paxb)
862 return 0;
863
864 return -ENODEV;
865}
866
867void paxb_init(void)
868{
869 int ret;
870
871 ret = paxb_set_config();
872 if (ret)
873 return;
874
875 paxb_ns_init(paxb->type);
876
877 ret = pcie_cores_init();
878 if (ret)
879 return;
880
881 if (paxb->phy_init) {
882 ret = paxb->phy_init();
883 if (ret)
884 return;
885 }
886
887 paxb_cfg_dev_id();
888 paxb_cfg_tgt_trn();
889 paxb_cfg_pdl_ctrl();
890 if (paxb->type == PAXB_SR) {
891 paxb_ib_regs_init();
892 paxb_cfg_coherency();
893 } else
894 paxb_ib_regs_bypass();
895
896 paxb_cfg_apb_timeout();
897 paxb_smmu_cfg();
898 paxb_cfg_clkreq();
899 paxb_rc_link_init();
900
901 /* Stingray Doesn't support LTR */
902 paxb_cfg_LTR(false);
903 paxb_cfg_dl_active(true);
904
905 paxb_cfg_mps();
906
907#ifdef PAXB_LINKUP
908 paxb_start_link_up();
909#endif
910 INFO("PAXB init done\n");
911}