blob: d2c86ac6a3a35b351a019576befee4ecfd6011a2 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/iopoll.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
developerfd40db22021-04-29 10:08:25 +080018#include <linux/of_pci.h>
developerfd40db22021-04-29 10:08:25 +080019#include <linux/pci.h>
20#include <linux/phy/phy.h>
21#include <linux/platform_device.h>
22#include <linux/pm_domain.h>
23#include <linux/pm_runtime.h>
24#include <linux/reset.h>
25
26#include "../pci.h"
27
28#define PCIE_SETTING_REG 0x80
29#define PCIE_PCI_IDS_1 0x9c
30#define PCI_CLASS(class) (class << 8)
31#define PCIE_RC_MODE BIT(0)
32
33#define PCIE_CFGNUM_REG 0x140
34#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
35#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
36#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
37#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
38#define PCIE_CFG_OFFSET_ADDR 0x1000
39#define PCIE_CFG_HEADER(bus, devfn) \
40 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
41
42#define PCIE_RST_CTRL_REG 0x148
43#define PCIE_MAC_RSTB BIT(0)
44#define PCIE_PHY_RSTB BIT(1)
45#define PCIE_BRG_RSTB BIT(2)
46#define PCIE_PE_RSTB BIT(3)
47
48#define PCIE_LTSSM_STATUS_REG 0x150
49#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
50#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
51#define PCIE_LTSSM_STATE_L2_IDLE 0x14
52
53#define PCIE_LINK_STATUS_REG 0x154
54#define PCIE_PORT_LINKUP BIT(8)
55
developer8adc7332022-11-03 16:05:20 +080056#define PCIE_MSI_GROUP_NUM 4
developerfd40db22021-04-29 10:08:25 +080057#define PCIE_MSI_SET_NUM 8
58#define PCIE_MSI_IRQS_PER_SET 32
59#define PCIE_MSI_IRQS_NUM \
developer44e30b02021-07-02 11:12:14 +080060 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
developerfd40db22021-04-29 10:08:25 +080061
62#define PCIE_INT_ENABLE_REG 0x180
developer44e30b02021-07-02 11:12:14 +080063#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
developerfd40db22021-04-29 10:08:25 +080064#define PCIE_MSI_SHIFT 8
65#define PCIE_INTX_SHIFT 24
developer44e30b02021-07-02 11:12:14 +080066#define PCIE_INTX_ENABLE \
67 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
developerfd40db22021-04-29 10:08:25 +080068
69#define PCIE_INT_STATUS_REG 0x184
70#define PCIE_MSI_SET_ENABLE_REG 0x190
developer44e30b02021-07-02 11:12:14 +080071#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
72
73#define PCIE_MSI_SET_BASE_REG 0xc00
74#define PCIE_MSI_SET_OFFSET 0x10
75#define PCIE_MSI_SET_STATUS_OFFSET 0x04
76#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
developer63dcf012021-09-02 10:14:03 +080077#define PCIE_MSI_SET_GRP1_ENABLE_OFFSET 0x0c
developer44e30b02021-07-02 11:12:14 +080078
developer8adc7332022-11-03 16:05:20 +080079#define PCIE_MSI_SET_GRP2_ENABLE_OFFSET 0x1c0
80#define PCIE_MSI_SET_GRP2_OFFSET 0x04
81
82#define PCIE_MSI_SET_GRP3_ENABLE_OFFSET 0x1e0
83#define PCIE_MSI_SET_GRP3_OFFSET 0x04
84
developer44e30b02021-07-02 11:12:14 +080085#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
86#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
developerfd40db22021-04-29 10:08:25 +080087
88#define PCIE_ICMD_PM_REG 0x198
89#define PCIE_TURN_OFF_LINK BIT(4)
90
developerfd40db22021-04-29 10:08:25 +080091#define PCIE_TRANS_TABLE_BASE_REG 0x800
92#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
93#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
94#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
95#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
96#define PCIE_ATR_TLB_SET_OFFSET 0x20
97
98#define PCIE_MAX_TRANS_TABLES 8
99#define PCIE_ATR_EN BIT(0)
100#define PCIE_ATR_SIZE(size) \
101 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
102#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
103#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
104#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
105#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
106#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
107#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
108
109/**
developer8adc7332022-11-03 16:05:20 +0800110 * enum mtk_msi_group_type - PCIe controller MSI group type
111 * @group0_merge_msi: all MSI are merged to group0
112 * @group1_direct_msi: all MSI have independent IRQs via group1
113 * @group_binding_msi: all MSI are bound to all group
114 */
115enum mtk_msi_group_type {
116 group0_merge_msi,
117 group1_direct_msi,
118 group_binding_msi,
119};
120
121/**
developer44e30b02021-07-02 11:12:14 +0800122 * struct mtk_msi_set - MSI information for each set
developerfd40db22021-04-29 10:08:25 +0800123 * @base: IO mapped register base
developer8adc7332022-11-03 16:05:20 +0800124 * @enable: IO mapped enable register address
developerfd40db22021-04-29 10:08:25 +0800125 * @msg_addr: MSI message address
developer44e30b02021-07-02 11:12:14 +0800126 * @saved_irq_state: IRQ enable state saved at suspend time
developerfd40db22021-04-29 10:08:25 +0800127 */
developer44e30b02021-07-02 11:12:14 +0800128struct mtk_msi_set {
developerfd40db22021-04-29 10:08:25 +0800129 void __iomem *base;
developer8adc7332022-11-03 16:05:20 +0800130 void __iomem *enable[PCIE_MSI_GROUP_NUM];
developerfd40db22021-04-29 10:08:25 +0800131 phys_addr_t msg_addr;
developer8adc7332022-11-03 16:05:20 +0800132 u32 saved_irq_state[PCIE_MSI_GROUP_NUM];
developerfd40db22021-04-29 10:08:25 +0800133};
134
135/**
developer8adc7332022-11-03 16:05:20 +0800136 * struct mtk_pcie_irq - PCIe controller interrupt information
137 * @irq: IRQ interrupt number
138 * @group: IRQ MSI group number
139 * @mapped_table: IRQ MSI group mapped table
140 */
141struct mtk_pcie_irq {
142 int irq;
143 int group;
144 u32 mapped_table;
145};
146
147/**
developerfd40db22021-04-29 10:08:25 +0800148 * struct mtk_pcie_port - PCIe port information
developer44e30b02021-07-02 11:12:14 +0800149 * @dev: pointer to PCIe device
developerfd40db22021-04-29 10:08:25 +0800150 * @base: IO mapped register base
developer44e30b02021-07-02 11:12:14 +0800151 * @reg_base: physical register base
152 * @mac_reset: MAC reset control
153 * @phy_reset: PHY reset control
developerfd40db22021-04-29 10:08:25 +0800154 * @phy: PHY controller block
155 * @clks: PCIe clocks
156 * @num_clks: PCIe clocks count for this port
developer8adc7332022-11-03 16:05:20 +0800157 * @max_link_width: PCIe slot max supported link width
developerfd40db22021-04-29 10:08:25 +0800158 * @irq: PCIe controller interrupt number
developer8adc7332022-11-03 16:05:20 +0800159 * @num_irqs: PCIe irqs count
160 * @irqs: PCIe controller interrupts information
developer44e30b02021-07-02 11:12:14 +0800161 * @saved_irq_state: IRQ enable state saved at suspend time
162 * @irq_lock: lock protecting IRQ register access
developerfd40db22021-04-29 10:08:25 +0800163 * @intx_domain: legacy INTx IRQ domain
164 * @msi_domain: MSI IRQ domain
developer44e30b02021-07-02 11:12:14 +0800165 * @msi_bottom_domain: MSI IRQ bottom domain
166 * @msi_sets: MSI sets information
developer8adc7332022-11-03 16:05:20 +0800167 * @msi_group_type: PCIe controller MSI group type
developerfd40db22021-04-29 10:08:25 +0800168 * @lock: lock protecting IRQ bit map
169 * @msi_irq_in_use: bit map for assigned MSI IRQ
170 */
171struct mtk_pcie_port {
172 struct device *dev;
173 void __iomem *base;
174 phys_addr_t reg_base;
175 struct reset_control *mac_reset;
176 struct reset_control *phy_reset;
177 struct phy *phy;
178 struct clk_bulk_data *clks;
179 int num_clks;
developer8adc7332022-11-03 16:05:20 +0800180 int max_link_width;
developerfd40db22021-04-29 10:08:25 +0800181
182 int irq;
developer8adc7332022-11-03 16:05:20 +0800183 int num_irqs;
184 struct mtk_pcie_irq *irqs;
developer44e30b02021-07-02 11:12:14 +0800185 u32 saved_irq_state;
186 raw_spinlock_t irq_lock;
developerfd40db22021-04-29 10:08:25 +0800187 struct irq_domain *intx_domain;
188 struct irq_domain *msi_domain;
developer44e30b02021-07-02 11:12:14 +0800189 struct irq_domain *msi_bottom_domain;
190 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
developer8adc7332022-11-03 16:05:20 +0800191 enum mtk_msi_group_type msi_group_type;
developerfd40db22021-04-29 10:08:25 +0800192 struct mutex lock;
193 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
194};
195
196/**
developer44e30b02021-07-02 11:12:14 +0800197 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
developerfd40db22021-04-29 10:08:25 +0800198 * @bus: PCI bus to query
199 * @devfn: device/function number
200 * @where: offset in config space
201 * @size: data size in TLP header
202 *
203 * Set byte enable field and device information in configuration TLP header.
204 */
205static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
206 int where, int size)
207{
208 struct mtk_pcie_port *port = bus->sysdata;
209 int bytes;
210 u32 val;
211
212 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
213
214 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
215 PCIE_CFG_HEADER(bus->number, devfn);
216
developer44e30b02021-07-02 11:12:14 +0800217 writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
developerfd40db22021-04-29 10:08:25 +0800218}
219
220static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
221 int where)
222{
223 struct mtk_pcie_port *port = bus->sysdata;
224
225 return port->base + PCIE_CFG_OFFSET_ADDR + where;
226}
227
228static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
229 int where, int size, u32 *val)
230{
231 mtk_pcie_config_tlp_header(bus, devfn, where, size);
232
233 return pci_generic_config_read32(bus, devfn, where, size, val);
234}
235
236static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
237 int where, int size, u32 val)
238{
239 mtk_pcie_config_tlp_header(bus, devfn, where, size);
240
241 if (size <= 2)
242 val <<= (where & 0x3) * 8;
243
244 return pci_generic_config_write32(bus, devfn, where, 4, val);
245}
246
247static struct pci_ops mtk_pcie_ops = {
248 .map_bus = mtk_pcie_map_bus,
249 .read = mtk_pcie_config_read,
250 .write = mtk_pcie_config_write,
251};
252
developerca1c6b22023-04-26 19:51:06 +0800253/**
254 * This function will try to find the limitation of link width by finding
255 * a property called "max-link-width" of the given device node.
256 *
257 * @node: device tree node with the max link width information
258 *
259 * Returns the associated max link width from DT, or a negative value if the
260 * required property is not found or is invalid.
261 */
262int of_pci_get_max_link_width(struct device_node *node)
263{
264 u32 max_link_width = 0;
265
266 if (of_property_read_u32(node, "max-link-width", &max_link_width) ||
267 max_link_width == 0 || max_link_width > 2)
268 return -EINVAL;
269
270 return max_link_width;
271}
272
developerfd40db22021-04-29 10:08:25 +0800273static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
274 resource_size_t cpu_addr,
275 resource_size_t pci_addr,
276 resource_size_t size,
277 unsigned long type, int num)
278{
279 void __iomem *table;
developer44e30b02021-07-02 11:12:14 +0800280 u32 val;
developerfd40db22021-04-29 10:08:25 +0800281
282 if (num >= PCIE_MAX_TRANS_TABLES) {
developer44e30b02021-07-02 11:12:14 +0800283 dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
284 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
developerfd40db22021-04-29 10:08:25 +0800285 return -ENODEV;
286 }
287
288 table = port->base + PCIE_TRANS_TABLE_BASE_REG +
289 num * PCIE_ATR_TLB_SET_OFFSET;
290
developer44e30b02021-07-02 11:12:14 +0800291 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
292 table);
293 writel_relaxed(upper_32_bits(cpu_addr),
294 table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
295 writel_relaxed(lower_32_bits(pci_addr),
296 table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
297 writel_relaxed(upper_32_bits(pci_addr),
298 table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800299
300 if (type == IORESOURCE_IO)
301 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
302 else
303 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
304
developer44e30b02021-07-02 11:12:14 +0800305 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800306
307 return 0;
308}
309
developer44e30b02021-07-02 11:12:14 +0800310static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
311{
developer8adc7332022-11-03 16:05:20 +0800312 void __iomem *base = port->base + PCIE_MSI_SET_BASE_REG;
developer44e30b02021-07-02 11:12:14 +0800313 int i;
314 u32 val;
315
316 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
317 struct mtk_msi_set *msi_set = &port->msi_sets[i];
318
developer8adc7332022-11-03 16:05:20 +0800319 msi_set->base = base + i * PCIE_MSI_SET_OFFSET;
320 msi_set->enable[0] = base + PCIE_MSI_SET_ENABLE_OFFSET +
321 i * PCIE_MSI_SET_OFFSET;
322 msi_set->enable[1] = base + PCIE_MSI_SET_GRP1_ENABLE_OFFSET +
323 i * PCIE_MSI_SET_OFFSET;
324 msi_set->enable[2] = base + PCIE_MSI_SET_GRP2_ENABLE_OFFSET +
325 i * PCIE_MSI_SET_GRP2_OFFSET;
326 msi_set->enable[3] = base + PCIE_MSI_SET_GRP3_ENABLE_OFFSET +
327 i * PCIE_MSI_SET_GRP3_OFFSET;
328
developer44e30b02021-07-02 11:12:14 +0800329 msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
330 i * PCIE_MSI_SET_OFFSET;
331
332 /* Configure the MSI capture address */
333 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
334 writel_relaxed(upper_32_bits(msi_set->msg_addr),
335 port->base + PCIE_MSI_SET_ADDR_HI_BASE +
336 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
337 }
338
339 val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
340 val |= PCIE_MSI_SET_ENABLE;
341 writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
342
developer8adc7332022-11-03 16:05:20 +0800343 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
344 val |= PCIE_MSI_ENABLE;
345 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
developer44e30b02021-07-02 11:12:14 +0800346}
347
developerfd40db22021-04-29 10:08:25 +0800348static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
349{
350 struct resource_entry *entry;
351 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
352 unsigned int table_index = 0;
353 int err;
354 u32 val;
355
356 /* Set as RC mode */
developer44e30b02021-07-02 11:12:14 +0800357 val = readl_relaxed(port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800358 val |= PCIE_RC_MODE;
developer44e30b02021-07-02 11:12:14 +0800359 writel_relaxed(val, port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800360
developerca1c6b22023-04-26 19:51:06 +0800361 /* Set link width*/
362 val = readl_relaxed(port->base + PCIE_SETTING_REG);
363 if (port->max_link_width == 1) {
364 val &= ~GENMASK(11, 8);
365 } else if (port->max_link_width == 2) {
366 val &= ~GENMASK(11, 8);
367 val |= BIT(8);
368 }
369 writel_relaxed(val, port->base + PCIE_SETTING_REG);
370
developerfd40db22021-04-29 10:08:25 +0800371 /* Set class code */
developer44e30b02021-07-02 11:12:14 +0800372 val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
developerfd40db22021-04-29 10:08:25 +0800373 val &= ~GENMASK(31, 8);
374 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
developer44e30b02021-07-02 11:12:14 +0800375 writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
376
377 /* Mask all INTx interrupts */
378 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
379 val &= ~PCIE_INTX_ENABLE;
380 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800381
382 /* Assert all reset signals */
developer44e30b02021-07-02 11:12:14 +0800383 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800384 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +0800385 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800386
developer44e30b02021-07-02 11:12:14 +0800387 /*
388 * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
389 * and 2.2.1 (Initial Power-Up (G3 to S0)).
390 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
391 * for the power and clock to become stable.
392 */
393 msleep(100);
developerfd40db22021-04-29 10:08:25 +0800394
developer44e30b02021-07-02 11:12:14 +0800395 /* De-assert reset signals */
396 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
397 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800398
399 /* Check if the link is up or not */
400 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
developer44e30b02021-07-02 11:12:14 +0800401 !!(val & PCIE_PORT_LINKUP), 20,
402 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
developerfd40db22021-04-29 10:08:25 +0800403 if (err) {
developer44e30b02021-07-02 11:12:14 +0800404 val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
405 dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
developerfd40db22021-04-29 10:08:25 +0800406 return err;
407 }
408
developer44e30b02021-07-02 11:12:14 +0800409 mtk_pcie_enable_msi(port);
410
developerfd40db22021-04-29 10:08:25 +0800411 /* Set PCIe translation windows */
412 resource_list_for_each_entry(entry, &host->windows) {
413 struct resource *res = entry->res;
414 unsigned long type = resource_type(res);
415 resource_size_t cpu_addr;
416 resource_size_t pci_addr;
417 resource_size_t size;
418 const char *range_type;
419
420 if (type == IORESOURCE_IO) {
421 cpu_addr = pci_pio_to_address(res->start);
422 range_type = "IO";
423 } else if (type == IORESOURCE_MEM) {
424 cpu_addr = res->start;
425 range_type = "MEM";
426 } else {
427 continue;
428 }
429
430 pci_addr = res->start - entry->offset;
431 size = resource_size(res);
432 err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
433 type, table_index);
434 if (err)
435 return err;
436
437 dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
developer44e30b02021-07-02 11:12:14 +0800438 range_type, table_index, (unsigned long long)cpu_addr,
439 (unsigned long long)pci_addr, (unsigned long long)size);
developerfd40db22021-04-29 10:08:25 +0800440
441 table_index++;
442 }
443
444 return 0;
445}
446
developer8adc7332022-11-03 16:05:20 +0800447static struct mtk_pcie_irq *mtk_msi_hwirq_get_irqs(struct mtk_pcie_port *port, unsigned long hwirq)
448{
449 int i;
450
451 for (i = 0; i < port->num_irqs; i++)
452 if (port->irqs[i].mapped_table & BIT(hwirq))
453 return &port->irqs[i];
454
455 return NULL;
456}
457
458static struct mtk_pcie_irq *mtk_msi_irq_get_irqs(struct mtk_pcie_port *port, unsigned int irq)
459{
460 int i;
461
462 for (i = 0; i < port->num_irqs; i++)
463 if (port->irqs[i].irq == irq)
464 return &port->irqs[i];
465
466 return NULL;
467}
468
469static int mtk_pcie_msi_set_affinity(struct irq_data *data,
developer63dcf012021-09-02 10:14:03 +0800470 const struct cpumask *mask, bool force)
471{
472 struct mtk_pcie_port *port = data->domain->host_data;
473 struct irq_data *port_data;
474 struct irq_chip *port_chip;
developer8adc7332022-11-03 16:05:20 +0800475 struct mtk_pcie_irq *irqs;
476 unsigned long hwirq;
477 int ret;
developer63dcf012021-09-02 10:14:03 +0800478
developer8adc7332022-11-03 16:05:20 +0800479 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
480 irqs = mtk_msi_hwirq_get_irqs(port, hwirq);
481 if (IS_ERR_OR_NULL(irqs))
482 return -EINVAL;
developer63dcf012021-09-02 10:14:03 +0800483
developer8adc7332022-11-03 16:05:20 +0800484 port_data = irq_get_irq_data(irqs->irq);
developer63dcf012021-09-02 10:14:03 +0800485 port_chip = irq_data_get_irq_chip(port_data);
486 if (!port_chip || !port_chip->irq_set_affinity)
487 return -EINVAL;
488
489 ret = port_chip->irq_set_affinity(port_data, mask, force);
490
491 irq_data_update_effective_affinity(data, mask);
492
493 return ret;
494}
495
developer44e30b02021-07-02 11:12:14 +0800496static void mtk_pcie_msi_irq_mask(struct irq_data *data)
497{
498 pci_msi_mask_irq(data);
499 irq_chip_mask_parent(data);
500}
developerfd40db22021-04-29 10:08:25 +0800501
developer44e30b02021-07-02 11:12:14 +0800502static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
503{
504 pci_msi_unmask_irq(data);
505 irq_chip_unmask_parent(data);
506}
developerfd40db22021-04-29 10:08:25 +0800507
developer44e30b02021-07-02 11:12:14 +0800508static struct irq_chip mtk_msi_irq_chip = {
509 .irq_ack = irq_chip_ack_parent,
510 .irq_mask = mtk_pcie_msi_irq_mask,
511 .irq_unmask = mtk_pcie_msi_irq_unmask,
512 .name = "MSI",
513};
developerfd40db22021-04-29 10:08:25 +0800514
developer44e30b02021-07-02 11:12:14 +0800515static struct msi_domain_info mtk_msi_domain_info = {
516 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
517 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
518 .chip = &mtk_msi_irq_chip,
519};
developerfd40db22021-04-29 10:08:25 +0800520
521static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
522{
developer44e30b02021-07-02 11:12:14 +0800523 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
524 struct mtk_pcie_port *port = data->domain->host_data;
developerfd40db22021-04-29 10:08:25 +0800525 unsigned long hwirq;
526
developerfd40db22021-04-29 10:08:25 +0800527 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
528
developer44e30b02021-07-02 11:12:14 +0800529 msg->address_hi = upper_32_bits(msi_set->msg_addr);
530 msg->address_lo = lower_32_bits(msi_set->msg_addr);
developerfd40db22021-04-29 10:08:25 +0800531 msg->data = hwirq;
532 dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
533 hwirq, msg->address_hi, msg->address_lo, msg->data);
534}
535
developer44e30b02021-07-02 11:12:14 +0800536static void mtk_msi_bottom_irq_ack(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800537{
developer44e30b02021-07-02 11:12:14 +0800538 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
developerfd40db22021-04-29 10:08:25 +0800539 unsigned long hwirq;
540
developerfd40db22021-04-29 10:08:25 +0800541 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
542
developer44e30b02021-07-02 11:12:14 +0800543 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800544}
545
developer44e30b02021-07-02 11:12:14 +0800546static void mtk_msi_bottom_irq_mask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800547{
developer44e30b02021-07-02 11:12:14 +0800548 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
549 struct mtk_pcie_port *port = data->domain->host_data;
developer8adc7332022-11-03 16:05:20 +0800550 struct mtk_pcie_irq *irqs;
developer44e30b02021-07-02 11:12:14 +0800551 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800552 u32 val;
553
developerfd40db22021-04-29 10:08:25 +0800554 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
developer8adc7332022-11-03 16:05:20 +0800555 irqs = mtk_msi_hwirq_get_irqs(port, hwirq);
556 if (IS_ERR_OR_NULL(irqs))
557 return;
developerfd40db22021-04-29 10:08:25 +0800558
developer44e30b02021-07-02 11:12:14 +0800559 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer8adc7332022-11-03 16:05:20 +0800560 val = readl_relaxed(msi_set->enable[irqs->group]);
561 val &= ~BIT(hwirq);
562 writel_relaxed(val, msi_set->enable[irqs->group]);
developer44e30b02021-07-02 11:12:14 +0800563 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800564}
565
developer44e30b02021-07-02 11:12:14 +0800566static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800567{
developer44e30b02021-07-02 11:12:14 +0800568 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
569 struct mtk_pcie_port *port = data->domain->host_data;
developer8adc7332022-11-03 16:05:20 +0800570 struct mtk_pcie_irq *irqs;
developer44e30b02021-07-02 11:12:14 +0800571 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800572 u32 val;
573
developerfd40db22021-04-29 10:08:25 +0800574 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
developer8adc7332022-11-03 16:05:20 +0800575 irqs = mtk_msi_hwirq_get_irqs(port, hwirq);
576 if (IS_ERR_OR_NULL(irqs))
577 return;
developerfd40db22021-04-29 10:08:25 +0800578
developer44e30b02021-07-02 11:12:14 +0800579 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer8adc7332022-11-03 16:05:20 +0800580 val = readl_relaxed(msi_set->enable[irqs->group]);
581 val |= BIT(hwirq);
582 writel_relaxed(val, msi_set->enable[irqs->group]);
developer44e30b02021-07-02 11:12:14 +0800583 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800584}
585
developer44e30b02021-07-02 11:12:14 +0800586static struct irq_chip mtk_msi_bottom_irq_chip = {
587 .irq_ack = mtk_msi_bottom_irq_ack,
588 .irq_mask = mtk_msi_bottom_irq_mask,
589 .irq_unmask = mtk_msi_bottom_irq_unmask,
developerfd40db22021-04-29 10:08:25 +0800590 .irq_compose_msi_msg = mtk_compose_msi_msg,
developer8adc7332022-11-03 16:05:20 +0800591 .irq_set_affinity = mtk_pcie_msi_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800592 .name = "MSI",
developerfd40db22021-04-29 10:08:25 +0800593};
594
developer44e30b02021-07-02 11:12:14 +0800595static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
596 unsigned int virq, unsigned int nr_irqs,
597 void *arg)
developerfd40db22021-04-29 10:08:25 +0800598{
developer44e30b02021-07-02 11:12:14 +0800599 struct mtk_pcie_port *port = domain->host_data;
600 struct mtk_msi_set *msi_set;
601 int i, hwirq, set_idx;
developerfd40db22021-04-29 10:08:25 +0800602
603 mutex_lock(&port->lock);
604
605 hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
developer44e30b02021-07-02 11:12:14 +0800606 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800607
608 mutex_unlock(&port->lock);
609
developer44e30b02021-07-02 11:12:14 +0800610 if (hwirq < 0)
611 return -ENOSPC;
developerfd40db22021-04-29 10:08:25 +0800612
developer44e30b02021-07-02 11:12:14 +0800613 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
614 msi_set = &port->msi_sets[set_idx];
developerfd40db22021-04-29 10:08:25 +0800615
developer44e30b02021-07-02 11:12:14 +0800616 for (i = 0; i < nr_irqs; i++)
617 irq_domain_set_info(domain, virq + i, hwirq + i,
618 &mtk_msi_bottom_irq_chip, msi_set,
619 handle_edge_irq, NULL, NULL);
developerfd40db22021-04-29 10:08:25 +0800620
developer44e30b02021-07-02 11:12:14 +0800621 return 0;
developerfd40db22021-04-29 10:08:25 +0800622}
623
developer44e30b02021-07-02 11:12:14 +0800624static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
625 unsigned int virq, unsigned int nr_irqs)
developerfd40db22021-04-29 10:08:25 +0800626{
developer44e30b02021-07-02 11:12:14 +0800627 struct mtk_pcie_port *port = domain->host_data;
628 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
developerfd40db22021-04-29 10:08:25 +0800629
developer44e30b02021-07-02 11:12:14 +0800630 mutex_lock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800631
developer44e30b02021-07-02 11:12:14 +0800632 bitmap_release_region(port->msi_irq_in_use, data->hwirq,
633 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800634
developer44e30b02021-07-02 11:12:14 +0800635 mutex_unlock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800636
developer44e30b02021-07-02 11:12:14 +0800637 irq_domain_free_irqs_common(domain, virq, nr_irqs);
developerfd40db22021-04-29 10:08:25 +0800638}
639
developer44e30b02021-07-02 11:12:14 +0800640static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
641 .alloc = mtk_msi_bottom_domain_alloc,
642 .free = mtk_msi_bottom_domain_free,
developerfd40db22021-04-29 10:08:25 +0800643};
644
645static void mtk_intx_mask(struct irq_data *data)
646{
647 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800648 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800649 u32 val;
650
developer44e30b02021-07-02 11:12:14 +0800651 raw_spin_lock_irqsave(&port->irq_lock, flags);
652 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800653 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800654 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
655 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800656}
657
658static void mtk_intx_unmask(struct irq_data *data)
659{
660 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800661 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800662 u32 val;
663
developer44e30b02021-07-02 11:12:14 +0800664 raw_spin_lock_irqsave(&port->irq_lock, flags);
665 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800666 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800667 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
668 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800669}
670
developer44e30b02021-07-02 11:12:14 +0800671/**
672 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
673 * @data: pointer to chip specific data
674 *
675 * As an emulated level IRQ, its interrupt status will remain
676 * until the corresponding de-assert message is received; hence that
677 * the status can only be cleared when the interrupt has been serviced.
678 */
developerfd40db22021-04-29 10:08:25 +0800679static void mtk_intx_eoi(struct irq_data *data)
680{
681 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
682 unsigned long hwirq;
683
developerfd40db22021-04-29 10:08:25 +0800684 hwirq = data->hwirq + PCIE_INTX_SHIFT;
developer44e30b02021-07-02 11:12:14 +0800685 writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800686}
687
developer8adc7332022-11-03 16:05:20 +0800688static int mtk_pcie_intx_set_affinity(struct irq_data *data,
689 const struct cpumask *mask, bool force)
690{
691 struct mtk_pcie_port *port = data->domain->host_data;
692 struct irq_data *port_data;
693 struct irq_chip *port_chip;
694 int ret;
695
696 port_data = irq_get_irq_data(port->irq);
697 port_chip = irq_data_get_irq_chip(port_data);
698 if (!port_chip || !port_chip->irq_set_affinity)
699 return -EINVAL;
700 ret = port_chip->irq_set_affinity(port_data, mask, force);
701 irq_data_update_effective_affinity(data, mask);
702 return ret;
703}
704
developerfd40db22021-04-29 10:08:25 +0800705static struct irq_chip mtk_intx_irq_chip = {
706 .irq_mask = mtk_intx_mask,
707 .irq_unmask = mtk_intx_unmask,
708 .irq_eoi = mtk_intx_eoi,
developer8adc7332022-11-03 16:05:20 +0800709 .irq_set_affinity = mtk_pcie_intx_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800710 .name = "INTx",
developerfd40db22021-04-29 10:08:25 +0800711};
712
713static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
714 irq_hw_number_t hwirq)
715{
developer44e30b02021-07-02 11:12:14 +0800716 irq_set_chip_data(irq, domain->host_data);
developerfd40db22021-04-29 10:08:25 +0800717 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
718 handle_fasteoi_irq, "INTx");
developerfd40db22021-04-29 10:08:25 +0800719 return 0;
720}
721
722static const struct irq_domain_ops intx_domain_ops = {
723 .map = mtk_pcie_intx_map,
724};
725
developer44e30b02021-07-02 11:12:14 +0800726static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800727{
728 struct device *dev = port->dev;
developer44e30b02021-07-02 11:12:14 +0800729 struct device_node *intc_node, *node = dev->of_node;
730 int ret;
731
732 raw_spin_lock_init(&port->irq_lock);
developerfd40db22021-04-29 10:08:25 +0800733
734 /* Setup INTx */
735 intc_node = of_get_child_by_name(node, "interrupt-controller");
736 if (!intc_node) {
developer44e30b02021-07-02 11:12:14 +0800737 dev_err(dev, "missing interrupt-controller node\n");
developerfd40db22021-04-29 10:08:25 +0800738 return -ENODEV;
739 }
740
741 port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
742 &intx_domain_ops, port);
743 if (!port->intx_domain) {
developer44e30b02021-07-02 11:12:14 +0800744 dev_err(dev, "failed to create INTx IRQ domain\n");
developerfd40db22021-04-29 10:08:25 +0800745 return -ENODEV;
746 }
747
748 /* Setup MSI */
749 mutex_init(&port->lock);
750
developer44e30b02021-07-02 11:12:14 +0800751 port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
752 &mtk_msi_bottom_domain_ops, port);
753 if (!port->msi_bottom_domain) {
754 dev_err(dev, "failed to create MSI bottom domain\n");
developerfd40db22021-04-29 10:08:25 +0800755 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800756 goto err_msi_bottom_domain;
developerfd40db22021-04-29 10:08:25 +0800757 }
758
developer44e30b02021-07-02 11:12:14 +0800759 port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
760 &mtk_msi_domain_info,
761 port->msi_bottom_domain);
762 if (!port->msi_domain) {
763 dev_err(dev, "failed to create MSI domain\n");
developerfd40db22021-04-29 10:08:25 +0800764 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800765 goto err_msi_domain;
developerfd40db22021-04-29 10:08:25 +0800766 }
767
768 return 0;
769
developerfd40db22021-04-29 10:08:25 +0800770err_msi_domain:
developer44e30b02021-07-02 11:12:14 +0800771 irq_domain_remove(port->msi_bottom_domain);
772err_msi_bottom_domain:
developerfd40db22021-04-29 10:08:25 +0800773 irq_domain_remove(port->intx_domain);
774
775 return ret;
776}
777
778static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
779{
developer8adc7332022-11-03 16:05:20 +0800780 int i;
781
782 for (i = 0; i < port->num_irqs; i++)
783 irq_set_chained_handler_and_data(port->irqs[i].irq, NULL, NULL);
developerfd40db22021-04-29 10:08:25 +0800784
785 if (port->intx_domain)
786 irq_domain_remove(port->intx_domain);
787
788 if (port->msi_domain)
789 irq_domain_remove(port->msi_domain);
790
developer44e30b02021-07-02 11:12:14 +0800791 if (port->msi_bottom_domain)
792 irq_domain_remove(port->msi_bottom_domain);
developerfd40db22021-04-29 10:08:25 +0800793
developer8adc7332022-11-03 16:05:20 +0800794 for (i = 0; i < port->num_irqs; i++)
795 irq_dispose_mapping(port->irqs[i].irq);
developerfd40db22021-04-29 10:08:25 +0800796}
797
developer8adc7332022-11-03 16:05:20 +0800798static void mtk_pcie_msi_handler(struct irq_desc *desc, int set_idx)
developer44e30b02021-07-02 11:12:14 +0800799{
developer8adc7332022-11-03 16:05:20 +0800800 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
developer44e30b02021-07-02 11:12:14 +0800801 struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
developer8adc7332022-11-03 16:05:20 +0800802 struct mtk_pcie_irq *irqs;
developer44e30b02021-07-02 11:12:14 +0800803 unsigned long msi_enable, msi_status;
804 unsigned int virq;
805 irq_hw_number_t bit, hwirq;
806
developer8adc7332022-11-03 16:05:20 +0800807 irqs = mtk_msi_irq_get_irqs(port, irq_desc_get_irq(desc));
808 if (IS_ERR_OR_NULL(irqs))
809 return;
810
811 msi_enable = readl_relaxed(msi_set->enable[irqs->group]);
812 msi_enable &= irqs->mapped_table;
813 if (!msi_enable)
814 return;
developer44e30b02021-07-02 11:12:14 +0800815
816 do {
817 msi_status = readl_relaxed(msi_set->base +
818 PCIE_MSI_SET_STATUS_OFFSET);
819 msi_status &= msi_enable;
820 if (!msi_status)
821 break;
822
823 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
824 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
825 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
826 generic_handle_irq(virq);
827 }
828 } while (true);
829}
830
developerfd40db22021-04-29 10:08:25 +0800831static void mtk_pcie_irq_handler(struct irq_desc *desc)
832{
833 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
834 struct irq_chip *irqchip = irq_desc_get_chip(desc);
835 unsigned long status;
836 unsigned int virq;
developer8adc7332022-11-03 16:05:20 +0800837 irq_hw_number_t irq_bit;
developerfd40db22021-04-29 10:08:25 +0800838
839 chained_irq_enter(irqchip, desc);
840
developer44e30b02021-07-02 11:12:14 +0800841 status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
developer8adc7332022-11-03 16:05:20 +0800842
843 /* INTx handler */
844 irq_bit = PCIE_INTX_SHIFT;
developer44e30b02021-07-02 11:12:14 +0800845 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
846 PCIE_INTX_SHIFT) {
847 virq = irq_find_mapping(port->intx_domain,
848 irq_bit - PCIE_INTX_SHIFT);
849 generic_handle_irq(virq);
developerfd40db22021-04-29 10:08:25 +0800850 }
851
developer8adc7332022-11-03 16:05:20 +0800852 /* Group MSI don't trigger INT_STATUS, need to check MSI_SET_STATUS */
853 if (port->msi_group_type == group0_merge_msi) {
854 irq_bit = PCIE_MSI_SHIFT;
855 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
856 PCIE_MSI_SHIFT) {
857 mtk_pcie_msi_handler(desc, irq_bit - PCIE_MSI_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800858
developer8adc7332022-11-03 16:05:20 +0800859 writel_relaxed(BIT(irq_bit), port->base +
860 PCIE_INT_STATUS_REG);
861 }
862 } else {
863 for (irq_bit = PCIE_MSI_SHIFT; irq_bit < (PCIE_MSI_SET_NUM +
864 PCIE_MSI_SHIFT); irq_bit++) {
865 mtk_pcie_msi_handler(desc, irq_bit - PCIE_MSI_SHIFT);
866
867 writel_relaxed(BIT(irq_bit), port->base +
868 PCIE_INT_STATUS_REG);
869 }
developerfd40db22021-04-29 10:08:25 +0800870 }
871
872 chained_irq_exit(irqchip, desc);
873}
874
developer8adc7332022-11-03 16:05:20 +0800875static int mtk_pcie_parse_msi(struct mtk_pcie_port *port)
developer63dcf012021-09-02 10:14:03 +0800876{
developer8adc7332022-11-03 16:05:20 +0800877 struct device *dev = port->dev;
878 struct device_node *node = dev->of_node;
879 struct platform_device *pdev = to_platform_device(dev);
880 const char *msi_type;
881 u32 mask_check = 0, *msimap;
882 int count, err, i;
developer63dcf012021-09-02 10:14:03 +0800883
developer8adc7332022-11-03 16:05:20 +0800884 /* Get MSI group type */
885 port->msi_group_type = group0_merge_msi;
886 if (!of_property_read_string(node, "msi_type", &msi_type)) {
887 if (!strcmp(msi_type, "direct_msi"))
888 port->msi_group_type = group1_direct_msi;
889 if (!strcmp(msi_type, "binding_msi"))
890 port->msi_group_type = group_binding_msi;
developer63dcf012021-09-02 10:14:03 +0800891 }
892
developer8adc7332022-11-03 16:05:20 +0800893 port->num_irqs = platform_irq_count(pdev);
894 port->irqs = devm_kzalloc(dev, sizeof(struct mtk_pcie_irq) * port->num_irqs,
895 GFP_KERNEL);
896 if (!port->irqs)
897 return -ENOMEM;
developer63dcf012021-09-02 10:14:03 +0800898
developer8adc7332022-11-03 16:05:20 +0800899 /* Merge MSI don't need map table */
900 if (port->msi_group_type == group0_merge_msi) {
901 port->irqs[0].group = 0;
902 port->irqs[0].mapped_table = GENMASK(31, 0);
developer63dcf012021-09-02 10:14:03 +0800903
developer8adc7332022-11-03 16:05:20 +0800904 return 0;
905 }
developer63dcf012021-09-02 10:14:03 +0800906
developer8adc7332022-11-03 16:05:20 +0800907 /* Parse MSI map table from dts */
908 count = of_property_count_elems_of_size(node, "msi-map", sizeof(u32));
909 if ((count <= 0) || (count / 2 > port->num_irqs))
910 return -EINVAL;
911 msimap = devm_kzalloc(dev, sizeof(u32) * count, GFP_KERNEL);
912 if (!msimap)
913 return -ENOMEM;
developer63dcf012021-09-02 10:14:03 +0800914
developer8adc7332022-11-03 16:05:20 +0800915 err = of_property_read_u32_array(node, "msi-map", msimap, count);
916 if (err)
917 return err;
918
919 for (i = 0; i < (count / 2); i++) {
920 if ((msimap[i * 2] >= PCIE_MSI_GROUP_NUM) ||
921 (msimap[i * 2 + 1] & mask_check)) {
922 return -EINVAL;
923 }
924
925 port->irqs[i].group = msimap[i * 2];
926 port->irqs[i].mapped_table = msimap[i * 2 + 1];
927 mask_check |= msimap[i * 2 + 1];
developer63dcf012021-09-02 10:14:03 +0800928 }
929
developer8adc7332022-11-03 16:05:20 +0800930 return 0;
developer63dcf012021-09-02 10:14:03 +0800931}
932
developer44e30b02021-07-02 11:12:14 +0800933static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800934{
935 struct device *dev = port->dev;
936 struct platform_device *pdev = to_platform_device(dev);
developer63dcf012021-09-02 10:14:03 +0800937 int err, i;
developerfd40db22021-04-29 10:08:25 +0800938
developer44e30b02021-07-02 11:12:14 +0800939 err = mtk_pcie_init_irq_domains(port);
940 if (err)
developerfd40db22021-04-29 10:08:25 +0800941 return err;
developerfd40db22021-04-29 10:08:25 +0800942
943 port->irq = platform_get_irq(pdev, 0);
944 if (port->irq < 0)
945 return port->irq;
946
developer8adc7332022-11-03 16:05:20 +0800947 for (i = 0; i < port->num_irqs; i++) {
948 port->irqs[i].irq = platform_get_irq(pdev, i);
949 if (port->irqs[i].irq < 0)
950 return port->irqs[i].irq;
developerfd40db22021-04-29 10:08:25 +0800951
developer8adc7332022-11-03 16:05:20 +0800952 irq_set_chained_handler_and_data(port->irqs[i].irq,
953 mtk_pcie_irq_handler, port);
developer63dcf012021-09-02 10:14:03 +0800954 }
955
developerfd40db22021-04-29 10:08:25 +0800956 return 0;
957}
958
developer44e30b02021-07-02 11:12:14 +0800959static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800960{
developer44e30b02021-07-02 11:12:14 +0800961 struct device *dev = port->dev;
962 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
963 struct platform_device *pdev = to_platform_device(dev);
964 struct list_head *windows = &host->windows;
965 struct resource *regs, *bus;
developerfd40db22021-04-29 10:08:25 +0800966 int ret;
967
developer44e30b02021-07-02 11:12:14 +0800968 ret = pci_parse_request_of_pci_ranges(dev, windows, &bus);
969 if (ret) {
970 dev_err(dev, "failed to parse pci ranges\n");
971 return ret;
developerfd40db22021-04-29 10:08:25 +0800972 }
973
developer44e30b02021-07-02 11:12:14 +0800974 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
975 port->base = devm_ioremap_resource(dev, regs);
976 if (IS_ERR(port->base)) {
977 dev_err(dev, "failed to map register base\n");
978 return PTR_ERR(port->base);
979 }
980
981 port->reg_base = regs->start;
982
983 port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
984 if (IS_ERR(port->phy_reset)) {
985 ret = PTR_ERR(port->phy_reset);
986 if (ret != -EPROBE_DEFER)
987 dev_err(dev, "failed to get PHY reset\n");
988
989 return ret;
990 }
991
992 port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
993 if (IS_ERR(port->mac_reset)) {
994 ret = PTR_ERR(port->mac_reset);
995 if (ret != -EPROBE_DEFER)
996 dev_err(dev, "failed to get MAC reset\n");
997
developerfd40db22021-04-29 10:08:25 +0800998 return ret;
999 }
1000
developer44e30b02021-07-02 11:12:14 +08001001 port->phy = devm_phy_optional_get(dev, "pcie-phy");
1002 if (IS_ERR(port->phy)) {
1003 ret = PTR_ERR(port->phy);
1004 if (ret != -EPROBE_DEFER)
1005 dev_err(dev, "failed to get PHY\n");
1006
1007 return ret;
1008 }
1009
1010 port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
1011 if (port->num_clks < 0) {
1012 dev_err(dev, "failed to get clocks\n");
1013 return port->num_clks;
1014 }
1015
developerca1c6b22023-04-26 19:51:06 +08001016 port->max_link_width = of_pci_get_max_link_width(dev->of_node);
1017 if (port->max_link_width < 0)
1018 dev_err(dev, "failed to get max link width\n");
1019
developer8adc7332022-11-03 16:05:20 +08001020 ret = mtk_pcie_parse_msi(port);
1021 if (ret) {
1022 dev_err(dev, "failed to parse msi\n");
1023 return ret;
1024 }
1025
developerfd40db22021-04-29 10:08:25 +08001026 return 0;
1027}
1028
1029static int mtk_pcie_power_up(struct mtk_pcie_port *port)
1030{
1031 struct device *dev = port->dev;
1032 int err;
1033
developerfd40db22021-04-29 10:08:25 +08001034 /* PHY power on and enable pipe clock */
developerfd40db22021-04-29 10:08:25 +08001035 reset_control_deassert(port->phy_reset);
1036
developerfd40db22021-04-29 10:08:25 +08001037 err = phy_init(port->phy);
1038 if (err) {
developer44e30b02021-07-02 11:12:14 +08001039 dev_err(dev, "failed to initialize PHY\n");
developerfd40db22021-04-29 10:08:25 +08001040 goto err_phy_init;
1041 }
1042
developer44e30b02021-07-02 11:12:14 +08001043 err = phy_power_on(port->phy);
1044 if (err) {
1045 dev_err(dev, "failed to power on PHY\n");
1046 goto err_phy_on;
developerfd40db22021-04-29 10:08:25 +08001047 }
1048
developer44e30b02021-07-02 11:12:14 +08001049 /* MAC power on and enable transaction layer clocks */
developerfd40db22021-04-29 10:08:25 +08001050 reset_control_deassert(port->mac_reset);
1051
developerfd40db22021-04-29 10:08:25 +08001052 pm_runtime_enable(dev);
1053 pm_runtime_get_sync(dev);
1054
developer44e30b02021-07-02 11:12:14 +08001055 err = clk_bulk_prepare_enable(port->num_clks, port->clks);
developerfd40db22021-04-29 10:08:25 +08001056 if (err) {
developer44e30b02021-07-02 11:12:14 +08001057 dev_err(dev, "failed to enable clocks\n");
developerfd40db22021-04-29 10:08:25 +08001058 goto err_clk_init;
1059 }
1060
1061 return 0;
1062
1063err_clk_init:
1064 pm_runtime_put_sync(dev);
1065 pm_runtime_disable(dev);
1066 reset_control_assert(port->mac_reset);
developerfd40db22021-04-29 10:08:25 +08001067 phy_power_off(port->phy);
1068err_phy_on:
developer44e30b02021-07-02 11:12:14 +08001069 phy_exit(port->phy);
1070err_phy_init:
developerfd40db22021-04-29 10:08:25 +08001071 reset_control_assert(port->phy_reset);
1072
1073 return err;
1074}
1075
1076static void mtk_pcie_power_down(struct mtk_pcie_port *port)
1077{
1078 clk_bulk_disable_unprepare(port->num_clks, port->clks);
1079
1080 pm_runtime_put_sync(port->dev);
1081 pm_runtime_disable(port->dev);
1082 reset_control_assert(port->mac_reset);
1083
1084 phy_power_off(port->phy);
1085 phy_exit(port->phy);
1086 reset_control_assert(port->phy_reset);
1087}
1088
1089static int mtk_pcie_setup(struct mtk_pcie_port *port)
1090{
developerfd40db22021-04-29 10:08:25 +08001091 int err;
1092
developer44e30b02021-07-02 11:12:14 +08001093 err = mtk_pcie_parse_port(port);
developerfd40db22021-04-29 10:08:25 +08001094 if (err)
1095 return err;
1096
developerfd40db22021-04-29 10:08:25 +08001097 /* Don't touch the hardware registers before power up */
1098 err = mtk_pcie_power_up(port);
1099 if (err)
1100 return err;
1101
1102 /* Try link up */
1103 err = mtk_pcie_startup_port(port);
developer44e30b02021-07-02 11:12:14 +08001104 if (err)
developerfd40db22021-04-29 10:08:25 +08001105 goto err_setup;
developerfd40db22021-04-29 10:08:25 +08001106
developer44e30b02021-07-02 11:12:14 +08001107 err = mtk_pcie_setup_irq(port);
developerfd40db22021-04-29 10:08:25 +08001108 if (err)
1109 goto err_setup;
1110
developerfd40db22021-04-29 10:08:25 +08001111 return 0;
1112
1113err_setup:
1114 mtk_pcie_power_down(port);
1115
1116 return err;
1117}
1118
developerfd40db22021-04-29 10:08:25 +08001119static int mtk_pcie_probe(struct platform_device *pdev)
1120{
1121 struct device *dev = &pdev->dev;
1122 struct mtk_pcie_port *port;
1123 struct pci_host_bridge *host;
1124 int err;
1125
1126 host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
1127 if (!host)
1128 return -ENOMEM;
1129
1130 port = pci_host_bridge_priv(host);
1131
1132 port->dev = dev;
1133 platform_set_drvdata(pdev, port);
1134
1135 err = mtk_pcie_setup(port);
1136 if (err)
developer44e30b02021-07-02 11:12:14 +08001137 return err;
developerfd40db22021-04-29 10:08:25 +08001138
developerfd40db22021-04-29 10:08:25 +08001139 host->dev.parent = port->dev;
developer44e30b02021-07-02 11:12:14 +08001140 host->ops = &mtk_pcie_ops;
developerfd40db22021-04-29 10:08:25 +08001141 host->map_irq = of_irq_parse_and_map_pci;
1142 host->swizzle_irq = pci_common_swizzle;
developerfd40db22021-04-29 10:08:25 +08001143 host->sysdata = port;
1144
1145 err = pci_host_probe(host);
1146 if (err) {
1147 mtk_pcie_irq_teardown(port);
1148 mtk_pcie_power_down(port);
developer44e30b02021-07-02 11:12:14 +08001149 return err;
developerfd40db22021-04-29 10:08:25 +08001150 }
1151
1152 return 0;
developerfd40db22021-04-29 10:08:25 +08001153}
1154
1155static int mtk_pcie_remove(struct platform_device *pdev)
1156{
1157 struct mtk_pcie_port *port = platform_get_drvdata(pdev);
1158 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
1159
1160 pci_lock_rescan_remove();
1161 pci_stop_root_bus(host->bus);
1162 pci_remove_root_bus(host->bus);
1163 pci_unlock_rescan_remove();
1164
1165 mtk_pcie_irq_teardown(port);
1166 mtk_pcie_power_down(port);
1167
1168 return 0;
1169}
1170
developer44e30b02021-07-02 11:12:14 +08001171static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
1172{
developer8adc7332022-11-03 16:05:20 +08001173 int i, n;
developer44e30b02021-07-02 11:12:14 +08001174
1175 raw_spin_lock(&port->irq_lock);
1176
1177 port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
1178
1179 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1180 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1181
developer8adc7332022-11-03 16:05:20 +08001182 for (n = 0; n < PCIE_MSI_GROUP_NUM; n++)
1183 msi_set->saved_irq_state[n] = readl_relaxed(
1184 msi_set->enable[n]);
developer44e30b02021-07-02 11:12:14 +08001185 }
1186
1187 raw_spin_unlock(&port->irq_lock);
1188}
1189
1190static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
1191{
developer8adc7332022-11-03 16:05:20 +08001192 int i, n;
developer44e30b02021-07-02 11:12:14 +08001193
1194 raw_spin_lock(&port->irq_lock);
1195
1196 writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
1197
1198 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1199 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1200
developer8adc7332022-11-03 16:05:20 +08001201 for (n = 0; n < PCIE_MSI_GROUP_NUM; n++)
1202 writel_relaxed(msi_set->saved_irq_state[n],
1203 msi_set->enable[n]);
developer44e30b02021-07-02 11:12:14 +08001204 }
1205
1206 raw_spin_unlock(&port->irq_lock);
1207}
1208
developerfd40db22021-04-29 10:08:25 +08001209static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
1210{
1211 u32 val;
1212
developer44e30b02021-07-02 11:12:14 +08001213 val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001214 val |= PCIE_TURN_OFF_LINK;
developer44e30b02021-07-02 11:12:14 +08001215 writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001216
1217 /* Check the link is L2 */
1218 return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
1219 (PCIE_LTSSM_STATE(val) ==
1220 PCIE_LTSSM_STATE_L2_IDLE), 20,
1221 50 * USEC_PER_MSEC);
1222}
1223
1224static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1225{
1226 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1227 int err;
1228 u32 val;
1229
1230 /* Trigger link to L2 state */
1231 err = mtk_pcie_turn_off_link(port);
1232 if (err) {
developer44e30b02021-07-02 11:12:14 +08001233 dev_err(port->dev, "cannot enter L2 state\n");
developerfd40db22021-04-29 10:08:25 +08001234 return err;
1235 }
1236
1237 /* Pull down the PERST# pin */
developer44e30b02021-07-02 11:12:14 +08001238 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001239 val |= PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +08001240 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001241
developer44e30b02021-07-02 11:12:14 +08001242 dev_dbg(port->dev, "entered L2 states successfully");
developerfd40db22021-04-29 10:08:25 +08001243
developer44e30b02021-07-02 11:12:14 +08001244 mtk_pcie_irq_save(port);
1245 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001246
1247 return 0;
1248}
1249
1250static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1251{
1252 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1253 int err;
1254
developer44e30b02021-07-02 11:12:14 +08001255 err = mtk_pcie_power_up(port);
1256 if (err)
developerfd40db22021-04-29 10:08:25 +08001257 return err;
developerfd40db22021-04-29 10:08:25 +08001258
1259 err = mtk_pcie_startup_port(port);
1260 if (err) {
developer44e30b02021-07-02 11:12:14 +08001261 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001262 return err;
1263 }
1264
developer44e30b02021-07-02 11:12:14 +08001265 mtk_pcie_irq_restore(port);
developerfd40db22021-04-29 10:08:25 +08001266
1267 return 0;
1268}
1269
1270static const struct dev_pm_ops mtk_pcie_pm_ops = {
1271 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1272 mtk_pcie_resume_noirq)
1273};
1274
1275static const struct of_device_id mtk_pcie_of_match[] = {
1276 { .compatible = "mediatek,mt8192-pcie" },
developer44e30b02021-07-02 11:12:14 +08001277 { .compatible = "mediatek,mt7986-pcie" },
developerfd40db22021-04-29 10:08:25 +08001278 {},
1279};
1280
1281static struct platform_driver mtk_pcie_driver = {
1282 .probe = mtk_pcie_probe,
1283 .remove = mtk_pcie_remove,
1284 .driver = {
1285 .name = "mtk-pcie",
1286 .of_match_table = mtk_pcie_of_match,
1287 .pm = &mtk_pcie_pm_ops,
1288 },
1289};
1290
1291module_platform_driver(mtk_pcie_driver);
1292MODULE_LICENSE("GPL v2");