Marek Vasut | 988ee70 | 2013-12-14 05:55:28 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Freescale i.MX6 PCI Express Root-Complex driver |
| 3 | * |
| 4 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
| 5 | * |
| 6 | * Based on upstream Linux kernel driver: |
| 7 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> |
| 8 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> |
| 9 | * |
| 10 | * SPDX-License-Identifier: GPL-2.0 |
| 11 | */ |
| 12 | |
| 13 | #include <common.h> |
| 14 | #include <pci.h> |
| 15 | #include <asm/arch/clock.h> |
| 16 | #include <asm/arch/iomux.h> |
| 17 | #include <asm/arch/crm_regs.h> |
| 18 | #include <asm/io.h> |
| 19 | #include <asm/sizes.h> |
| 20 | #include <errno.h> |
| 21 | |
| 22 | #define PCI_ACCESS_READ 0 |
| 23 | #define PCI_ACCESS_WRITE 1 |
| 24 | |
| 25 | #define MX6_DBI_ADDR 0x01ffc000 |
| 26 | #define MX6_DBI_SIZE 0x4000 |
| 27 | #define MX6_IO_ADDR 0x01000000 |
| 28 | #define MX6_IO_SIZE 0x100000 |
| 29 | #define MX6_MEM_ADDR 0x01100000 |
| 30 | #define MX6_MEM_SIZE 0xe00000 |
| 31 | #define MX6_ROOT_ADDR 0x01f00000 |
| 32 | #define MX6_ROOT_SIZE 0xfc000 |
| 33 | |
| 34 | /* PCIe Port Logic registers (memory-mapped) */ |
| 35 | #define PL_OFFSET 0x700 |
| 36 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) |
| 37 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) |
| 38 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) |
| 39 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) |
| 40 | |
| 41 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
| 42 | #define PCIE_PHY_CTRL_DATA_LOC 0 |
| 43 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 |
| 44 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 |
| 45 | #define PCIE_PHY_CTRL_WR_LOC 18 |
| 46 | #define PCIE_PHY_CTRL_RD_LOC 19 |
| 47 | |
| 48 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
| 49 | #define PCIE_PHY_STAT_DATA_LOC 0 |
| 50 | #define PCIE_PHY_STAT_ACK_LOC 16 |
| 51 | |
| 52 | /* PHY registers (not memory-mapped) */ |
| 53 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
| 54 | |
| 55 | #define PHY_RX_OVRD_IN_LO 0x1005 |
| 56 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) |
| 57 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) |
| 58 | |
| 59 | /* iATU registers */ |
| 60 | #define PCIE_ATU_VIEWPORT 0x900 |
| 61 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) |
| 62 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) |
| 63 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) |
| 64 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) |
| 65 | #define PCIE_ATU_CR1 0x904 |
| 66 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) |
| 67 | #define PCIE_ATU_TYPE_IO (0x2 << 0) |
| 68 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) |
| 69 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) |
| 70 | #define PCIE_ATU_CR2 0x908 |
| 71 | #define PCIE_ATU_ENABLE (0x1 << 31) |
| 72 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) |
| 73 | #define PCIE_ATU_LOWER_BASE 0x90C |
| 74 | #define PCIE_ATU_UPPER_BASE 0x910 |
| 75 | #define PCIE_ATU_LIMIT 0x914 |
| 76 | #define PCIE_ATU_LOWER_TARGET 0x918 |
| 77 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) |
| 78 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) |
| 79 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
| 80 | #define PCIE_ATU_UPPER_TARGET 0x91C |
| 81 | |
| 82 | /* |
| 83 | * PHY access functions |
| 84 | */ |
| 85 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) |
| 86 | { |
| 87 | u32 val; |
| 88 | u32 max_iterations = 10; |
| 89 | u32 wait_counter = 0; |
| 90 | |
| 91 | do { |
| 92 | val = readl(dbi_base + PCIE_PHY_STAT); |
| 93 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; |
| 94 | wait_counter++; |
| 95 | |
| 96 | if (val == exp_val) |
| 97 | return 0; |
| 98 | |
| 99 | udelay(1); |
| 100 | } while (wait_counter < max_iterations); |
| 101 | |
| 102 | return -ETIMEDOUT; |
| 103 | } |
| 104 | |
| 105 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) |
| 106 | { |
| 107 | u32 val; |
| 108 | int ret; |
| 109 | |
| 110 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
| 111 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 112 | |
| 113 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); |
| 114 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 115 | |
| 116 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 117 | if (ret) |
| 118 | return ret; |
| 119 | |
| 120 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
| 121 | writel(val, dbi_base + PCIE_PHY_CTRL); |
| 122 | |
| 123 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 124 | if (ret) |
| 125 | return ret; |
| 126 | |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ |
| 131 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) |
| 132 | { |
| 133 | u32 val, phy_ctl; |
| 134 | int ret; |
| 135 | |
| 136 | ret = pcie_phy_wait_ack(dbi_base, addr); |
| 137 | if (ret) |
| 138 | return ret; |
| 139 | |
| 140 | /* assert Read signal */ |
| 141 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; |
| 142 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); |
| 143 | |
| 144 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 145 | if (ret) |
| 146 | return ret; |
| 147 | |
| 148 | val = readl(dbi_base + PCIE_PHY_STAT); |
| 149 | *data = val & 0xffff; |
| 150 | |
| 151 | /* deassert Read signal */ |
| 152 | writel(0x00, dbi_base + PCIE_PHY_CTRL); |
| 153 | |
| 154 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 155 | if (ret) |
| 156 | return ret; |
| 157 | |
| 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) |
| 162 | { |
| 163 | u32 var; |
| 164 | int ret; |
| 165 | |
| 166 | /* write addr */ |
| 167 | /* cap addr */ |
| 168 | ret = pcie_phy_wait_ack(dbi_base, addr); |
| 169 | if (ret) |
| 170 | return ret; |
| 171 | |
| 172 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 173 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 174 | |
| 175 | /* capture data */ |
| 176 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); |
| 177 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 178 | |
| 179 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 180 | if (ret) |
| 181 | return ret; |
| 182 | |
| 183 | /* deassert cap data */ |
| 184 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 185 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 186 | |
| 187 | /* wait for ack de-assertion */ |
| 188 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 189 | if (ret) |
| 190 | return ret; |
| 191 | |
| 192 | /* assert wr signal */ |
| 193 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; |
| 194 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 195 | |
| 196 | /* wait for ack */ |
| 197 | ret = pcie_phy_poll_ack(dbi_base, 1); |
| 198 | if (ret) |
| 199 | return ret; |
| 200 | |
| 201 | /* deassert wr signal */ |
| 202 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
| 203 | writel(var, dbi_base + PCIE_PHY_CTRL); |
| 204 | |
| 205 | /* wait for ack de-assertion */ |
| 206 | ret = pcie_phy_poll_ack(dbi_base, 0); |
| 207 | if (ret) |
| 208 | return ret; |
| 209 | |
| 210 | writel(0x0, dbi_base + PCIE_PHY_CTRL); |
| 211 | |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static int imx6_pcie_link_up(void) |
| 216 | { |
| 217 | u32 rc, ltssm; |
| 218 | int rx_valid, temp; |
| 219 | |
| 220 | /* link is debug bit 36, debug register 1 starts at bit 32 */ |
| 221 | rc = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1); |
| 222 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && |
| 223 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) |
| 224 | return -EAGAIN; |
| 225 | |
| 226 | /* |
| 227 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. |
| 228 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). |
| 229 | * If (MAC/LTSSM.state == Recovery.RcvrLock) |
| 230 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition |
| 231 | * to gen2 is stuck |
| 232 | */ |
| 233 | pcie_phy_read((void *)MX6_DBI_ADDR, PCIE_PHY_RX_ASIC_OUT, &rx_valid); |
| 234 | ltssm = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0) & 0x3F; |
| 235 | |
| 236 | if (rx_valid & 0x01) |
| 237 | return 0; |
| 238 | |
| 239 | if (ltssm != 0x0d) |
| 240 | return 0; |
| 241 | |
| 242 | printf("transition to gen2 is stuck, reset PHY!\n"); |
| 243 | |
| 244 | pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp); |
| 245 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
| 246 | pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp); |
| 247 | |
| 248 | udelay(3000); |
| 249 | |
| 250 | pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp); |
| 251 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
| 252 | pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp); |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | /* |
| 258 | * iATU region setup |
| 259 | */ |
| 260 | static int imx_pcie_regions_setup(void) |
| 261 | { |
| 262 | /* |
| 263 | * i.MX6 defines 16MB in the AXI address map for PCIe. |
| 264 | * |
| 265 | * That address space excepted the pcie registers is |
| 266 | * split and defined into different regions by iATU, |
| 267 | * with sizes and offsets as follows: |
| 268 | * |
| 269 | * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO |
| 270 | * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM |
| 271 | * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers |
| 272 | */ |
| 273 | |
| 274 | /* CMD reg:I/O space, MEM space, and Bus Master Enable */ |
| 275 | setbits_le32(MX6_DBI_ADDR | PCI_COMMAND, |
| 276 | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
| 277 | |
| 278 | /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI */ |
| 279 | setbits_le32(MX6_DBI_ADDR + PCI_CLASS_REVISION, |
| 280 | PCI_CLASS_BRIDGE_PCI << 16); |
| 281 | |
| 282 | /* Region #0 is used for Outbound CFG space access. */ |
| 283 | writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT); |
| 284 | |
| 285 | writel(MX6_ROOT_ADDR, MX6_DBI_ADDR + PCIE_ATU_LOWER_BASE); |
| 286 | writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_BASE); |
| 287 | writel(MX6_ROOT_ADDR + MX6_ROOT_SIZE, MX6_DBI_ADDR + PCIE_ATU_LIMIT); |
| 288 | |
| 289 | writel(0, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET); |
| 290 | writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_TARGET); |
| 291 | writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 292 | writel(PCIE_ATU_ENABLE, MX6_DBI_ADDR + PCIE_ATU_CR2); |
| 293 | |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * PCI Express accessors |
| 299 | */ |
| 300 | static uint32_t get_bus_address(pci_dev_t d, int where) |
| 301 | { |
| 302 | uint32_t va_address; |
| 303 | |
| 304 | /* Reconfigure Region #0 */ |
| 305 | writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT); |
| 306 | |
| 307 | if (PCI_BUS(d) < 2) |
| 308 | writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 309 | else |
| 310 | writel(PCIE_ATU_TYPE_CFG1, MX6_DBI_ADDR + PCIE_ATU_CR1); |
| 311 | |
| 312 | if (PCI_BUS(d) == 0) { |
| 313 | va_address = MX6_DBI_ADDR; |
| 314 | } else { |
| 315 | writel(d << 8, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET); |
| 316 | va_address = MX6_IO_ADDR + SZ_16M - SZ_1M; |
| 317 | } |
| 318 | |
| 319 | va_address += (where & ~0x3); |
| 320 | |
| 321 | return va_address; |
| 322 | } |
| 323 | |
| 324 | static int imx_pcie_addr_valid(pci_dev_t d) |
| 325 | { |
| 326 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1)) |
| 327 | return -EINVAL; |
| 328 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) |
| 329 | return -EINVAL; |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | /* |
| 334 | * Replace the original ARM DABT handler with a simple jump-back one. |
| 335 | * |
| 336 | * The problem here is that if we have a PCIe bridge attached to this PCIe |
| 337 | * controller, but no PCIe device is connected to the bridges' downstream |
| 338 | * port, the attempt to read/write from/to the config space will produce |
| 339 | * a DABT. This is a behavior of the controller and can not be disabled |
| 340 | * unfortuatelly. |
| 341 | * |
| 342 | * To work around the problem, we backup the current DABT handler address |
| 343 | * and replace it with our own DABT handler, which only bounces right back |
| 344 | * into the code. |
| 345 | */ |
| 346 | static void imx_pcie_fix_dabt_handler(bool set) |
| 347 | { |
| 348 | extern uint32_t *_data_abort; |
| 349 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; |
| 350 | |
| 351 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; |
| 352 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; |
| 353 | |
| 354 | static uint32_t data_abort_backup; |
| 355 | |
| 356 | if (set) { |
| 357 | data_abort_backup = *data_abort_addr; |
| 358 | *data_abort_addr = data_abort_bounce_addr; |
| 359 | } else { |
| 360 | *data_abort_addr = data_abort_backup; |
| 361 | } |
| 362 | } |
| 363 | |
| 364 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, |
| 365 | int where, u32 *val) |
| 366 | { |
| 367 | uint32_t va_address; |
| 368 | int ret; |
| 369 | |
| 370 | ret = imx_pcie_addr_valid(d); |
| 371 | if (ret) { |
| 372 | *val = 0xffffffff; |
| 373 | return ret; |
| 374 | } |
| 375 | |
| 376 | va_address = get_bus_address(d, where); |
| 377 | |
| 378 | /* |
| 379 | * Read the PCIe config space. We must replace the DABT handler |
| 380 | * here in case we got data abort from the PCIe controller, see |
| 381 | * imx_pcie_fix_dabt_handler() description. Note that writing the |
| 382 | * "val" with valid value is also imperative here as in case we |
| 383 | * did got DABT, the val would contain random value. |
| 384 | */ |
| 385 | imx_pcie_fix_dabt_handler(true); |
| 386 | writel(0xffffffff, val); |
| 387 | *val = readl(va_address); |
| 388 | imx_pcie_fix_dabt_handler(false); |
| 389 | |
| 390 | return 0; |
| 391 | } |
| 392 | |
| 393 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, |
| 394 | int where, u32 val) |
| 395 | { |
| 396 | uint32_t va_address = 0; |
| 397 | int ret; |
| 398 | |
| 399 | ret = imx_pcie_addr_valid(d); |
| 400 | if (ret) |
| 401 | return ret; |
| 402 | |
| 403 | va_address = get_bus_address(d, where); |
| 404 | |
| 405 | /* |
| 406 | * Write the PCIe config space. We must replace the DABT handler |
| 407 | * here in case we got data abort from the PCIe controller, see |
| 408 | * imx_pcie_fix_dabt_handler() description. |
| 409 | */ |
| 410 | imx_pcie_fix_dabt_handler(true); |
| 411 | writel(val, va_address); |
| 412 | imx_pcie_fix_dabt_handler(false); |
| 413 | |
| 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Initial bus setup |
| 419 | */ |
| 420 | static int imx6_pcie_assert_core_reset(void) |
| 421 | { |
| 422 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 423 | |
| 424 | setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); |
| 425 | clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); |
| 426 | |
| 427 | return 0; |
| 428 | } |
| 429 | |
| 430 | static int imx6_pcie_init_phy(void) |
| 431 | { |
| 432 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 433 | |
| 434 | clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); |
| 435 | |
| 436 | clrsetbits_le32(&iomuxc_regs->gpr[12], |
| 437 | IOMUXC_GPR12_DEVICE_TYPE_MASK, |
| 438 | IOMUXC_GPR12_DEVICE_TYPE_RC); |
| 439 | clrsetbits_le32(&iomuxc_regs->gpr[12], |
| 440 | IOMUXC_GPR12_LOS_LEVEL_MASK, |
| 441 | IOMUXC_GPR12_LOS_LEVEL_9); |
| 442 | |
| 443 | writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | |
| 444 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | |
| 445 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | |
| 446 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | |
| 447 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET), |
| 448 | &iomuxc_regs->gpr[8]); |
| 449 | |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | static int imx6_pcie_deassert_core_reset(void) |
| 454 | { |
| 455 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 456 | |
| 457 | /* FIXME: Power-up GPIO goes here. */ |
| 458 | |
| 459 | /* Enable PCIe */ |
| 460 | clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN); |
| 461 | setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN); |
| 462 | |
| 463 | enable_pcie_clock(); |
| 464 | |
| 465 | /* |
| 466 | * Wait for the clock to settle a bit, when the clock are sourced |
| 467 | * from the CPU, we need about 30mS to settle. |
| 468 | */ |
| 469 | mdelay(30); |
| 470 | |
| 471 | /* FIXME: GPIO reset goes here */ |
| 472 | mdelay(100); |
| 473 | |
| 474 | return 0; |
| 475 | } |
| 476 | |
| 477 | static int imx_pcie_link_up(void) |
| 478 | { |
| 479 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
| 480 | uint32_t tmp; |
| 481 | int count = 0; |
| 482 | |
| 483 | imx6_pcie_assert_core_reset(); |
| 484 | imx6_pcie_init_phy(); |
| 485 | imx6_pcie_deassert_core_reset(); |
| 486 | |
| 487 | imx_pcie_regions_setup(); |
| 488 | |
| 489 | /* |
| 490 | * FIXME: Force the PCIe RC to Gen1 operation |
| 491 | * The RC must be forced into Gen1 mode before bringing the link |
| 492 | * up, otherwise no downstream devices are detected. After the |
| 493 | * link is up, a managed Gen1->Gen2 transition can be initiated. |
| 494 | */ |
| 495 | tmp = readl(MX6_DBI_ADDR + 0x7c); |
| 496 | tmp &= ~0xf; |
| 497 | tmp |= 0x1; |
| 498 | writel(tmp, MX6_DBI_ADDR + 0x7c); |
| 499 | |
| 500 | /* LTSSM enable, starting link. */ |
| 501 | setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE); |
| 502 | |
| 503 | while (!imx6_pcie_link_up()) { |
| 504 | udelay(10); |
| 505 | count++; |
| 506 | if (count >= 2000) { |
| 507 | debug("phy link never came up\n"); |
| 508 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", |
| 509 | readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0), |
| 510 | readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1)); |
| 511 | return -EINVAL; |
| 512 | } |
| 513 | } |
| 514 | |
| 515 | return 0; |
| 516 | } |
| 517 | |
| 518 | void imx_pcie_init(void) |
| 519 | { |
| 520 | /* Static instance of the controller. */ |
| 521 | static struct pci_controller pcc; |
| 522 | struct pci_controller *hose = &pcc; |
| 523 | int ret; |
| 524 | |
| 525 | memset(&pcc, 0, sizeof(pcc)); |
| 526 | |
| 527 | /* PCI I/O space */ |
| 528 | pci_set_region(&hose->regions[0], |
| 529 | MX6_IO_ADDR, MX6_IO_ADDR, |
| 530 | MX6_IO_SIZE, PCI_REGION_IO); |
| 531 | |
| 532 | /* PCI memory space */ |
| 533 | pci_set_region(&hose->regions[1], |
| 534 | MX6_MEM_ADDR, MX6_MEM_ADDR, |
| 535 | MX6_MEM_SIZE, PCI_REGION_MEM); |
| 536 | |
| 537 | /* System memory space */ |
| 538 | pci_set_region(&hose->regions[2], |
| 539 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, |
| 540 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); |
| 541 | |
| 542 | hose->region_count = 3; |
| 543 | |
| 544 | pci_set_ops(hose, |
| 545 | pci_hose_read_config_byte_via_dword, |
| 546 | pci_hose_read_config_word_via_dword, |
| 547 | imx_pcie_read_config, |
| 548 | pci_hose_write_config_byte_via_dword, |
| 549 | pci_hose_write_config_word_via_dword, |
| 550 | imx_pcie_write_config); |
| 551 | |
| 552 | /* Start the controller. */ |
| 553 | ret = imx_pcie_link_up(); |
| 554 | |
| 555 | if (!ret) { |
| 556 | pci_register_hose(hose); |
| 557 | hose->last_busno = pci_hose_scan(hose); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | /* Probe function. */ |
| 562 | void pci_init_board(void) |
| 563 | { |
| 564 | imx_pcie_init(); |
| 565 | } |