blob: 3d7a60dcaf9efbee19476b6ba353194513bd76f2 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/iopoll.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
18#include <linux/of_address.h>
19#include <linux/of_clk.h>
20#include <linux/of_pci.h>
21#include <linux/of_platform.h>
22#include <linux/pci.h>
23#include <linux/phy/phy.h>
24#include <linux/platform_device.h>
25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h>
27#include <linux/reset.h>
28
29#include "../pci.h"
30
31#define PCIE_SETTING_REG 0x80
32#define PCIE_PCI_IDS_1 0x9c
33#define PCI_CLASS(class) (class << 8)
34#define PCIE_RC_MODE BIT(0)
35
36#define PCIE_CFGNUM_REG 0x140
37#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
38#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
39#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
40#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
41#define PCIE_CFG_OFFSET_ADDR 0x1000
42#define PCIE_CFG_HEADER(bus, devfn) \
43 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
44
45#define PCIE_RST_CTRL_REG 0x148
46#define PCIE_MAC_RSTB BIT(0)
47#define PCIE_PHY_RSTB BIT(1)
48#define PCIE_BRG_RSTB BIT(2)
49#define PCIE_PE_RSTB BIT(3)
50
51#define PCIE_LTSSM_STATUS_REG 0x150
52#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
53#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
54#define PCIE_LTSSM_STATE_L2_IDLE 0x14
55
56#define PCIE_LINK_STATUS_REG 0x154
57#define PCIE_PORT_LINKUP BIT(8)
58
59#define PCIE_MSI_SET_NUM 8
60#define PCIE_MSI_IRQS_PER_SET 32
61#define PCIE_MSI_IRQS_NUM \
62 (PCIE_MSI_IRQS_PER_SET * (PCIE_MSI_SET_NUM))
63
64#define PCIE_INT_ENABLE_REG 0x180
65#define PCIE_MSI_MASK GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
66#define PCIE_MSI_SHIFT 8
67#define PCIE_INTX_SHIFT 24
68#define PCIE_INTX_MASK GENMASK(27, 24)
69
70#define PCIE_INT_STATUS_REG 0x184
71#define PCIE_MSI_SET_ENABLE_REG 0x190
72
73#define PCIE_ICMD_PM_REG 0x198
74#define PCIE_TURN_OFF_LINK BIT(4)
75
76#define PCIE_MSI_ADDR_BASE_REG 0xc00
77#define PCIE_MSI_SET_OFFSET 0x10
78#define PCIE_MSI_STATUS_OFFSET 0x04
79#define PCIE_MSI_ENABLE_OFFSET 0x08
80
81#define PCIE_TRANS_TABLE_BASE_REG 0x800
82#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
83#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
84#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
85#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
86#define PCIE_ATR_TLB_SET_OFFSET 0x20
87
88#define PCIE_MAX_TRANS_TABLES 8
89#define PCIE_ATR_EN BIT(0)
90#define PCIE_ATR_SIZE(size) \
91 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
92#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
93#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
94#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
95#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
96#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
97#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
98
99/**
100 * struct mtk_pcie_msi - MSI information for each set
101 * @base: IO mapped register base
102 * @irq: MSI set Interrupt number
103 * @index: MSI set number
104 * @msg_addr: MSI message address
105 * @domain: IRQ domain
106 */
107struct mtk_pcie_msi {
108 void __iomem *base;
109 unsigned int irq;
110 int index;
111 phys_addr_t msg_addr;
112 struct irq_domain *domain;
113};
114
115/**
116 * struct mtk_pcie_port - PCIe port information
117 * @dev: PCIe device
118 * @base: IO mapped register base
119 * @reg_base: Physical register base
120 * @mac_reset: mac reset control
121 * @phy_reset: phy reset control
122 * @phy: PHY controller block
123 * @clks: PCIe clocks
124 * @num_clks: PCIe clocks count for this port
125 * @irq: PCIe controller interrupt number
126 * @intx_domain: legacy INTx IRQ domain
127 * @msi_domain: MSI IRQ domain
128 * @msi_top_domain: MSI IRQ top domain
129 * @msi_info: MSI sets information
130 * @lock: lock protecting IRQ bit map
131 * @msi_irq_in_use: bit map for assigned MSI IRQ
132 */
133struct mtk_pcie_port {
134 struct device *dev;
135 void __iomem *base;
136 phys_addr_t reg_base;
137 struct reset_control *mac_reset;
138 struct reset_control *phy_reset;
139 struct phy *phy;
140 struct clk_bulk_data *clks;
141 int num_clks;
142 unsigned int busnr;
143
144 int irq;
145 struct irq_domain *intx_domain;
146 struct irq_domain *msi_domain;
147 struct irq_domain *msi_top_domain;
148 struct mtk_pcie_msi **msi_info;
149 struct mutex lock;
150 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
151};
152
153/**
154 * mtk_pcie_config_tlp_header
155 * @bus: PCI bus to query
156 * @devfn: device/function number
157 * @where: offset in config space
158 * @size: data size in TLP header
159 *
160 * Set byte enable field and device information in configuration TLP header.
161 */
162static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
163 int where, int size)
164{
165 struct mtk_pcie_port *port = bus->sysdata;
166 int bytes;
167 u32 val;
168
169 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
170
171 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
172 PCIE_CFG_HEADER(bus->number, devfn);
173
174 writel(val, port->base + PCIE_CFGNUM_REG);
175}
176
177static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
178 int where)
179{
180 struct mtk_pcie_port *port = bus->sysdata;
181
182 return port->base + PCIE_CFG_OFFSET_ADDR + where;
183}
184
185static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
186 int where, int size, u32 *val)
187{
188 mtk_pcie_config_tlp_header(bus, devfn, where, size);
189
190 return pci_generic_config_read32(bus, devfn, where, size, val);
191}
192
193static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
194 int where, int size, u32 val)
195{
196 mtk_pcie_config_tlp_header(bus, devfn, where, size);
197
198 if (size <= 2)
199 val <<= (where & 0x3) * 8;
200
201 return pci_generic_config_write32(bus, devfn, where, 4, val);
202}
203
204static struct pci_ops mtk_pcie_ops = {
205 .map_bus = mtk_pcie_map_bus,
206 .read = mtk_pcie_config_read,
207 .write = mtk_pcie_config_write,
208};
209
210static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
211 resource_size_t cpu_addr,
212 resource_size_t pci_addr,
213 resource_size_t size,
214 unsigned long type, int num)
215{
216 void __iomem *table;
217 u32 val = 0;
218
219 if (num >= PCIE_MAX_TRANS_TABLES) {
220 dev_notice(port->dev, "not enough translate table[%d] for addr: %#llx, limited to [%d]\n",
221 num, (unsigned long long) cpu_addr,
222 PCIE_MAX_TRANS_TABLES);
223 return -ENODEV;
224 }
225
226 table = port->base + PCIE_TRANS_TABLE_BASE_REG +
227 num * PCIE_ATR_TLB_SET_OFFSET;
228
229 writel(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), table);
230 writel(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
231 writel(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
232 writel(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
233
234 if (type == IORESOURCE_IO)
235 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
236 else
237 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
238
239 writel(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
240
241 return 0;
242}
243
244static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
245{
246 struct resource_entry *entry;
247 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
248 unsigned int table_index = 0;
249 int err;
250 u32 val;
251
252 /* Set as RC mode */
253 val = readl(port->base + PCIE_SETTING_REG);
254 val |= PCIE_RC_MODE;
255 writel(val, port->base + PCIE_SETTING_REG);
256
257 /* Set class code */
258 val = readl(port->base + PCIE_PCI_IDS_1);
259 val &= ~GENMASK(31, 8);
260 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
261 writel(val, port->base + PCIE_PCI_IDS_1);
262
263 /* Assert all reset signals */
264 val = readl(port->base + PCIE_RST_CTRL_REG);
265 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
266 writel(val, port->base + PCIE_RST_CTRL_REG);
267
268 /* De-assert reset signals */
269 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB);
270 writel(val, port->base + PCIE_RST_CTRL_REG);
271
272 /* Delay 100ms to wait the reference clocks become stable */
273 usleep_range(100 * 1000, 120 * 1000);
274
275 /* De-assert PERST# signal */
276 val &= ~PCIE_PE_RSTB;
277 writel(val, port->base + PCIE_RST_CTRL_REG);
278
279 /* Check if the link is up or not */
280 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
281 !!(val & PCIE_PORT_LINKUP), 20,
282 50 * USEC_PER_MSEC);
283 if (err) {
284 val = readl(port->base + PCIE_LTSSM_STATUS_REG);
285 dev_notice(port->dev, "PCIe link down, ltssm reg val: %#x\n",
286 val);
287 return err;
288 }
289
290 /* Set PCIe translation windows */
291 resource_list_for_each_entry(entry, &host->windows) {
292 struct resource *res = entry->res;
293 unsigned long type = resource_type(res);
294 resource_size_t cpu_addr;
295 resource_size_t pci_addr;
296 resource_size_t size;
297 const char *range_type;
298
299 if (type == IORESOURCE_IO) {
300 cpu_addr = pci_pio_to_address(res->start);
301 range_type = "IO";
302 } else if (type == IORESOURCE_MEM) {
303 cpu_addr = res->start;
304 range_type = "MEM";
305 } else {
306 continue;
307 }
308
309 pci_addr = res->start - entry->offset;
310 size = resource_size(res);
311 err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
312 type, table_index);
313 if (err)
314 return err;
315
316 dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
317 range_type, table_index, (unsigned long long) cpu_addr,
318 (unsigned long long) pci_addr,
319 (unsigned long long) size);
320
321 table_index++;
322 }
323
324 return 0;
325}
326
327static inline struct mtk_pcie_msi *mtk_get_msi_info(struct mtk_pcie_port *port,
328 unsigned long hwirq)
329{
330 return port->msi_info[hwirq / PCIE_MSI_IRQS_PER_SET];
331}
332
333static int mtk_pcie_set_affinity(struct irq_data *data,
334 const struct cpumask *mask, bool force)
335{
336 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
337 struct irq_data *port_data = irq_get_irq_data(port->irq);
338 struct irq_chip *port_chip = irq_data_get_irq_chip(port_data);
339 int ret;
340
341 if (!port_chip || !port_chip->irq_set_affinity)
342 return -EINVAL;
343
344 ret = port_chip->irq_set_affinity(port_data, mask, force);
345
346 irq_data_update_effective_affinity(data, mask);
347
348 return ret;
349}
350
351static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
352{
353 struct mtk_pcie_msi *msi_info;
354 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
355 unsigned long hwirq;
356
357 msi_info = mtk_get_msi_info(port, data->hwirq);
358 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
359
360 msg->address_hi = 0;
361 msg->address_lo = lower_32_bits(msi_info->msg_addr);
362 msg->data = hwirq;
363 dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
364 hwirq, msg->address_hi, msg->address_lo, msg->data);
365}
366
367static void mtk_msi_irq_ack(struct irq_data *data)
368{
369 struct mtk_pcie_msi *msi_info;
370 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
371 unsigned long hwirq;
372
373 msi_info = mtk_get_msi_info(port, data->hwirq);
374 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
375
376 writel(BIT(hwirq), msi_info->base + PCIE_MSI_STATUS_OFFSET);
377}
378
379static void mtk_msi_irq_mask(struct irq_data *data)
380{
381 struct mtk_pcie_msi *msi_info;
382 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
383 unsigned long hwirq;
384 u32 val;
385
386 msi_info = mtk_get_msi_info(port, data->hwirq);
387 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
388
389 val = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
390 val &= ~BIT(hwirq);
391 writel(val, msi_info->base + PCIE_MSI_ENABLE_OFFSET);
392
393 pci_msi_mask_irq(data);
394}
395
396static void mtk_msi_irq_unmask(struct irq_data *data)
397{
398 struct mtk_pcie_msi *msi_info;
399 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
400 unsigned long hwirq;
401 u32 val;
402
403 msi_info = mtk_get_msi_info(port, data->hwirq);
404 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
405
406 val = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
407 val |= BIT(hwirq);
408 writel(val, msi_info->base + PCIE_MSI_ENABLE_OFFSET);
409
410 pci_msi_unmask_irq(data);
411}
412
413static struct irq_chip mtk_msi_irq_chip = {
414 .irq_ack = mtk_msi_irq_ack,
415 .irq_compose_msi_msg = mtk_compose_msi_msg,
416 .irq_mask = mtk_msi_irq_mask,
417 .irq_unmask = mtk_msi_irq_unmask,
418 .irq_set_affinity = mtk_pcie_set_affinity,
419 .name = "PCIe",
420};
421
422static irq_hw_number_t mtk_pcie_msi_get_hwirq(struct msi_domain_info *info,
423 msi_alloc_info_t *arg)
424{
425 struct msi_desc *entry = arg->desc;
426 struct mtk_pcie_port *port = info->chip_data;
427 int hwirq;
428
429 mutex_lock(&port->lock);
430
431 hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
432 order_base_2(entry->nvec_used));
433 if (hwirq < 0) {
434 mutex_unlock(&port->lock);
435 return -ENOSPC;
436 }
437
438 mutex_unlock(&port->lock);
439
440 return hwirq;
441}
442
443static void mtk_pcie_msi_free(struct irq_domain *domain,
444 struct msi_domain_info *info, unsigned int virq)
445{
446 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
447 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
448
449 mutex_lock(&port->lock);
450
451 bitmap_clear(port->msi_irq_in_use, data->hwirq, 1);
452
453 mutex_unlock(&port->lock);
454}
455
456static struct msi_domain_ops mtk_msi_domain_ops = {
457 .get_hwirq = mtk_pcie_msi_get_hwirq,
458 .msi_free = mtk_pcie_msi_free,
459};
460
461static struct msi_domain_info mtk_msi_domain_info = {
462 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_PCI_MSIX |
463 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI),
464 .chip = &mtk_msi_irq_chip,
465 .ops = &mtk_msi_domain_ops,
466 .handler = handle_edge_irq,
467 .handler_name = "MSI",
468};
469
470static void mtk_msi_top_irq_eoi(struct irq_data *data)
471{
472 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
473 unsigned long msi_irq = data->hwirq + PCIE_MSI_SHIFT;
474
475 writel(BIT(msi_irq), port->base + PCIE_INT_STATUS_REG);
476}
477
478static struct irq_chip mtk_msi_top_irq_chip = {
479 .irq_eoi = mtk_msi_top_irq_eoi,
480 .name = "PCIe",
481};
482
483static void mtk_pcie_msi_handler(struct irq_desc *desc)
484{
485 struct mtk_pcie_msi *msi_info = irq_desc_get_handler_data(desc);
486 struct irq_chip *irqchip = irq_desc_get_chip(desc);
487 unsigned long msi_enable, msi_status;
488 unsigned int virq;
489 irq_hw_number_t bit, hwirq;
490
491 chained_irq_enter(irqchip, desc);
492
493 msi_enable = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
494 while ((msi_status = readl(msi_info->base + PCIE_MSI_STATUS_OFFSET))) {
495 msi_status &= msi_enable;
496 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
497 hwirq = bit + msi_info->index * PCIE_MSI_IRQS_PER_SET;
498 virq = irq_find_mapping(msi_info->domain, hwirq);
499 generic_handle_irq(virq);
500 }
501 }
502
503 chained_irq_exit(irqchip, desc);
504}
505
506static int mtk_msi_top_domain_map(struct irq_domain *domain,
507 unsigned int virq, irq_hw_number_t hwirq)
508{
509 struct mtk_pcie_port *port = domain->host_data;
510 struct mtk_pcie_msi *msi_info = port->msi_info[hwirq];
511
512 irq_domain_set_info(domain, virq, hwirq,
513 &mtk_msi_top_irq_chip, domain->host_data,
514 mtk_pcie_msi_handler, msi_info, NULL);
515
516 return 0;
517}
518
519static const struct irq_domain_ops mtk_msi_top_domain_ops = {
520 .map = mtk_msi_top_domain_map,
521};
522
523static void mtk_intx_mask(struct irq_data *data)
524{
525 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
526 u32 val;
527
528 val = readl(port->base + PCIE_INT_ENABLE_REG);
529 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
530 writel(val, port->base + PCIE_INT_ENABLE_REG);
531}
532
533static void mtk_intx_unmask(struct irq_data *data)
534{
535 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
536 u32 val;
537
538 val = readl(port->base + PCIE_INT_ENABLE_REG);
539 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
540 writel(val, port->base + PCIE_INT_ENABLE_REG);
541}
542
543static void mtk_intx_eoi(struct irq_data *data)
544{
545 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
546 unsigned long hwirq;
547
548 /**
549 * As an emulated level IRQ, its interrupt status will remain
550 * until the corresponding de-assert message is received; hence that
551 * the status can only be cleared when the interrupt has been serviced.
552 */
553 hwirq = data->hwirq + PCIE_INTX_SHIFT;
554 writel(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
555}
556
557static struct irq_chip mtk_intx_irq_chip = {
558 .irq_mask = mtk_intx_mask,
559 .irq_unmask = mtk_intx_unmask,
560 .irq_eoi = mtk_intx_eoi,
561 .irq_set_affinity = mtk_pcie_set_affinity,
562 .name = "PCIe",
563};
564
565static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
566 irq_hw_number_t hwirq)
567{
568 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
569 handle_fasteoi_irq, "INTx");
570 irq_set_chip_data(irq, domain->host_data);
571
572 return 0;
573}
574
575static const struct irq_domain_ops intx_domain_ops = {
576 .map = mtk_pcie_intx_map,
577};
578
579static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port,
580 struct device_node *node)
581{
582 struct device *dev = port->dev;
583 struct device_node *intc_node;
584 struct fwnode_handle *fwnode = of_node_to_fwnode(node);
585 struct mtk_pcie_msi *msi_info;
586 struct msi_domain_info *info;
587 int i, ret;
588
589 /* Setup INTx */
590 intc_node = of_get_child_by_name(node, "interrupt-controller");
591 if (!intc_node) {
592 dev_notice(dev, "missing PCIe Intc node\n");
593 return -ENODEV;
594 }
595
596 port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
597 &intx_domain_ops, port);
598 if (!port->intx_domain) {
599 dev_notice(dev, "failed to get INTx IRQ domain\n");
600 return -ENODEV;
601 }
602
603 /* Setup MSI */
604 mutex_init(&port->lock);
605
606 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
607 if (!info)
608 return -ENOMEM;
609
610 memcpy(info, &mtk_msi_domain_info, sizeof(*info));
611 info->chip_data = port;
612
613 port->msi_domain = pci_msi_create_irq_domain(fwnode, info, NULL);
614 if (!port->msi_domain) {
615 dev_info(dev, "failed to create MSI domain\n");
616 ret = -ENODEV;
617 goto err_msi_domain;
618 }
619
620 /* Enable MSI and setup PCIe domains */
621 port->msi_top_domain = irq_domain_add_hierarchy(NULL, 0, 0, node,
622 &mtk_msi_top_domain_ops,
623 port);
624 if (!port->msi_top_domain) {
625 dev_info(dev, "failed to create MSI top domain\n");
626 ret = -ENODEV;
627 goto err_msi_top_domain;
628 }
629
630 port->msi_info = devm_kzalloc(dev, PCIE_MSI_SET_NUM, GFP_KERNEL);
631 if (!port->msi_info) {
632 ret = -ENOMEM;
633 goto err_msi_info;
634 }
635
636 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
637 int offset = i * PCIE_MSI_SET_OFFSET;
638 u32 val;
639
640 msi_info = devm_kzalloc(dev, sizeof(*msi_info), GFP_KERNEL);
641 if (!msi_info) {
642 ret = -ENOMEM;
643 goto err_msi_set;
644 }
645
646 msi_info->base = port->base + PCIE_MSI_ADDR_BASE_REG + offset;
647 msi_info->msg_addr = port->reg_base + PCIE_MSI_ADDR_BASE_REG +
648 offset;
649
650 writel(lower_32_bits(msi_info->msg_addr), msi_info->base);
651
652 msi_info->index = i;
653 msi_info->domain = port->msi_domain;
654
655 port->msi_info[i] = msi_info;
656
657 /* Alloc IRQ for each MSI set */
658 msi_info->irq = irq_create_mapping(port->msi_top_domain, i);
659 if (!msi_info->irq) {
660 dev_info(dev, "allocate MSI top IRQ failed\n");
661 ret = -ENOSPC;
662 goto err_msi_set;
663 }
664
665 val = readl(port->base + PCIE_INT_ENABLE_REG);
666 val |= BIT(i + PCIE_MSI_SHIFT);
667 writel(val, port->base + PCIE_INT_ENABLE_REG);
668
669 val = readl(port->base + PCIE_MSI_SET_ENABLE_REG);
670 val |= BIT(i);
671 writel(val, port->base + PCIE_MSI_SET_ENABLE_REG);
672 }
673
674 return 0;
675
676err_msi_set:
677 while (i-- > 0) {
678 msi_info = port->msi_info[i];
679 irq_dispose_mapping(msi_info->irq);
680 }
681err_msi_info:
682 irq_domain_remove(port->msi_top_domain);
683err_msi_top_domain:
684 irq_domain_remove(port->msi_domain);
685err_msi_domain:
686 irq_domain_remove(port->intx_domain);
687
688 return ret;
689}
690
691static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
692{
693 struct mtk_pcie_msi *msi_info;
694 int i;
695
696 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
697
698 if (port->intx_domain)
699 irq_domain_remove(port->intx_domain);
700
701 if (port->msi_domain)
702 irq_domain_remove(port->msi_domain);
703
704 if (port->msi_top_domain) {
705 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
706 msi_info = port->msi_info[i];
707 irq_dispose_mapping(msi_info->irq);
708 }
709
710 irq_domain_remove(port->msi_top_domain);
711 }
712
713 irq_dispose_mapping(port->irq);
714}
715
716static void mtk_pcie_irq_handler(struct irq_desc *desc)
717{
718 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
719 struct irq_chip *irqchip = irq_desc_get_chip(desc);
720 unsigned long status;
721 unsigned int virq;
722 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
723
724 chained_irq_enter(irqchip, desc);
725
726 status = readl(port->base + PCIE_INT_STATUS_REG);
727 if (status & PCIE_INTX_MASK) {
728 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
729 PCIE_INTX_SHIFT) {
730 virq = irq_find_mapping(port->intx_domain,
731 irq_bit - PCIE_INTX_SHIFT);
732 generic_handle_irq(virq);
733 }
734 }
735
736 if (status & PCIE_MSI_MASK) {
737 irq_bit = PCIE_MSI_SHIFT;
738 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
739 PCIE_MSI_SHIFT) {
740 virq = irq_find_mapping(port->msi_top_domain,
741 irq_bit - PCIE_MSI_SHIFT);
742 generic_handle_irq(virq);
743 }
744 }
745
746 chained_irq_exit(irqchip, desc);
747}
748
749static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
750 struct device_node *node)
751{
752 struct device *dev = port->dev;
753 struct platform_device *pdev = to_platform_device(dev);
754 int err;
755
756 err = mtk_pcie_init_irq_domains(port, node);
757 if (err) {
758 dev_notice(dev, "failed to init PCIe IRQ domain\n");
759 return err;
760 }
761
762 port->irq = platform_get_irq(pdev, 0);
763 if (port->irq < 0)
764 return port->irq;
765
766 irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
767
768 return 0;
769}
770
771static int mtk_pcie_clk_init(struct mtk_pcie_port *port)
772{
773 int ret;
774
775 port->num_clks = devm_clk_bulk_get_all(port->dev, &port->clks);
776 if (port->num_clks < 0) {
777 dev_notice(port->dev, "failed to get PCIe clock\n");
778 return port->num_clks;
779 }
780
781 ret = clk_bulk_prepare_enable(port->num_clks, port->clks);
782 if (ret) {
783 dev_notice(port->dev, "failed to enable PCIe clocks\n");
784 return ret;
785 }
786
787 return 0;
788}
789
790static int mtk_pcie_power_up(struct mtk_pcie_port *port)
791{
792 struct device *dev = port->dev;
793 int err;
794
795 port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
796 if (IS_ERR(port->phy_reset))
797 return PTR_ERR(port->phy_reset);
798
799 /* PHY power on and enable pipe clock */
800 port->phy = devm_phy_optional_get(dev, "pcie-phy");
801 if (IS_ERR(port->phy))
802 return PTR_ERR(port->phy);
803
804 reset_control_deassert(port->phy_reset);
805
806 err = phy_power_on(port->phy);
807 if (err) {
808 dev_notice(dev, "failed to power on PCIe phy\n");
809 goto err_phy_on;
810 }
811
812 err = phy_init(port->phy);
813 if (err) {
814 dev_notice(dev, "failed to initialize PCIe phy\n");
815 goto err_phy_init;
816 }
817
818 port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
819 if (IS_ERR(port->mac_reset)) {
820 err = PTR_ERR(port->mac_reset);
821 goto err_mac_rst;
822 }
823
824 reset_control_deassert(port->mac_reset);
825
826 /* MAC power on and enable transaction layer clocks */
827 pm_runtime_enable(dev);
828 pm_runtime_get_sync(dev);
829
830 err = mtk_pcie_clk_init(port);
831 if (err) {
832 dev_notice(dev, "clock init failed\n");
833 goto err_clk_init;
834 }
835
836 return 0;
837
838err_clk_init:
839 pm_runtime_put_sync(dev);
840 pm_runtime_disable(dev);
841 reset_control_assert(port->mac_reset);
842err_mac_rst:
843 phy_exit(port->phy);
844err_phy_init:
845 phy_power_off(port->phy);
846err_phy_on:
847 reset_control_assert(port->phy_reset);
848
849 return err;
850}
851
852static void mtk_pcie_power_down(struct mtk_pcie_port *port)
853{
854 clk_bulk_disable_unprepare(port->num_clks, port->clks);
855
856 pm_runtime_put_sync(port->dev);
857 pm_runtime_disable(port->dev);
858 reset_control_assert(port->mac_reset);
859
860 phy_power_off(port->phy);
861 phy_exit(port->phy);
862 reset_control_assert(port->phy_reset);
863}
864
865static int mtk_pcie_setup(struct mtk_pcie_port *port)
866{
867 struct device *dev = port->dev;
868 struct platform_device *pdev = to_platform_device(dev);
869 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
870 struct list_head *windows = &host->windows;
871 struct resource *regs, *bus;
872 int err;
873
874 err = pci_parse_request_of_pci_ranges(dev, windows, &bus);
875 if (err)
876 return err;
877
878 port->busnr = bus->start;
879
880 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
881 port->base = devm_ioremap_resource(dev, regs);
882 if (IS_ERR(port->base)) {
883 dev_notice(dev, "failed to map register base\n");
884 return PTR_ERR(port->base);
885 }
886
887 port->reg_base = regs->start;
888
889 /* Don't touch the hardware registers before power up */
890 err = mtk_pcie_power_up(port);
891 if (err)
892 return err;
893
894 /* Try link up */
895 err = mtk_pcie_startup_port(port);
896 if (err) {
897 dev_notice(dev, "PCIe startup failed\n");
898 goto err_setup;
899 }
900
901 err = mtk_pcie_setup_irq(port, dev->of_node);
902 if (err)
903 goto err_setup;
904
905 dev_info(dev, "PCIe link up success!\n");
906
907 return 0;
908
909err_setup:
910 mtk_pcie_power_down(port);
911
912 return err;
913}
914
915static void release_io_range(struct device *dev)
916{
917 struct logic_pio_hwaddr *iorange = NULL;
918
919 iorange = find_io_range_by_fwnode(&dev->of_node->fwnode);
920 if (iorange) {
921 logic_pio_unregister_range(iorange);
922 kfree(iorange);
923 }
924}
925
926static int mtk_pcie_probe(struct platform_device *pdev)
927{
928 struct device *dev = &pdev->dev;
929 struct mtk_pcie_port *port;
930 struct pci_host_bridge *host;
931 int err;
932
933 host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
934 if (!host)
935 return -ENOMEM;
936
937 port = pci_host_bridge_priv(host);
938
939 port->dev = dev;
940 platform_set_drvdata(pdev, port);
941
942 err = mtk_pcie_setup(port);
943 if (err)
944 goto release_resource;
945
946 host->busnr = port->busnr;
947 host->dev.parent = port->dev;
948 host->map_irq = of_irq_parse_and_map_pci;
949 host->swizzle_irq = pci_common_swizzle;
950 host->ops = &mtk_pcie_ops;
951 host->sysdata = port;
952
953 err = pci_host_probe(host);
954 if (err) {
955 mtk_pcie_irq_teardown(port);
956 mtk_pcie_power_down(port);
957 goto release_resource;
958 }
959
960 return 0;
961
962release_resource:
963 release_io_range(dev);
964 pci_free_resource_list(&host->windows);
965
966 return err;
967}
968
969static int mtk_pcie_remove(struct platform_device *pdev)
970{
971 struct mtk_pcie_port *port = platform_get_drvdata(pdev);
972 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
973
974 pci_lock_rescan_remove();
975 pci_stop_root_bus(host->bus);
976 pci_remove_root_bus(host->bus);
977 pci_unlock_rescan_remove();
978
979 mtk_pcie_irq_teardown(port);
980 mtk_pcie_power_down(port);
981
982 return 0;
983}
984
985static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
986{
987 u32 val;
988
989 val = readl(port->base + PCIE_ICMD_PM_REG);
990 val |= PCIE_TURN_OFF_LINK;
991 writel(val, port->base + PCIE_ICMD_PM_REG);
992
993 /* Check the link is L2 */
994 return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
995 (PCIE_LTSSM_STATE(val) ==
996 PCIE_LTSSM_STATE_L2_IDLE), 20,
997 50 * USEC_PER_MSEC);
998}
999
1000static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1001{
1002 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1003 int err;
1004 u32 val;
1005
1006 /* Trigger link to L2 state */
1007 err = mtk_pcie_turn_off_link(port);
1008 if (err) {
1009 dev_notice(port->dev, "can not enter L2 state\n");
1010 return err;
1011 }
1012
1013 /* Pull down the PERST# pin */
1014 val = readl(port->base + PCIE_RST_CTRL_REG);
1015 val |= PCIE_PE_RSTB;
1016 writel(val, port->base + PCIE_RST_CTRL_REG);
1017
1018 dev_dbg(port->dev, "enter L2 state success");
1019
1020 clk_bulk_disable_unprepare(port->num_clks, port->clks);
1021
1022 phy_power_off(port->phy);
1023
1024 return 0;
1025}
1026
1027static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1028{
1029 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1030 int err;
1031
1032 phy_power_on(port->phy);
1033
1034 err = clk_bulk_prepare_enable(port->num_clks, port->clks);
1035 if (err) {
1036 dev_dbg(dev, "failed to enable PCIe clocks\n");
1037 return err;
1038 }
1039
1040 err = mtk_pcie_startup_port(port);
1041 if (err) {
1042 dev_notice(port->dev, "resume failed\n");
1043 return err;
1044 }
1045
1046 dev_dbg(port->dev, "resume done\n");
1047
1048 return 0;
1049}
1050
1051static const struct dev_pm_ops mtk_pcie_pm_ops = {
1052 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1053 mtk_pcie_resume_noirq)
1054};
1055
1056static const struct of_device_id mtk_pcie_of_match[] = {
1057 { .compatible = "mediatek,mt8192-pcie" },
1058 {},
1059};
1060
1061static struct platform_driver mtk_pcie_driver = {
1062 .probe = mtk_pcie_probe,
1063 .remove = mtk_pcie_remove,
1064 .driver = {
1065 .name = "mtk-pcie",
1066 .of_match_table = mtk_pcie_of_match,
1067 .pm = &mtk_pcie_pm_ops,
1068 },
1069};
1070
1071module_platform_driver(mtk_pcie_driver);
1072MODULE_LICENSE("GPL v2");