blob: 0654b4c9112959fd4327a54d793b5820f3f1889c [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/iopoll.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
developerfd40db22021-04-29 10:08:25 +080018#include <linux/of_pci.h>
developerfd40db22021-04-29 10:08:25 +080019#include <linux/pci.h>
20#include <linux/phy/phy.h>
21#include <linux/platform_device.h>
22#include <linux/pm_domain.h>
23#include <linux/pm_runtime.h>
24#include <linux/reset.h>
25
26#include "../pci.h"
27
28#define PCIE_SETTING_REG 0x80
29#define PCIE_PCI_IDS_1 0x9c
30#define PCI_CLASS(class) (class << 8)
31#define PCIE_RC_MODE BIT(0)
32
33#define PCIE_CFGNUM_REG 0x140
34#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
35#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
36#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
37#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
38#define PCIE_CFG_OFFSET_ADDR 0x1000
39#define PCIE_CFG_HEADER(bus, devfn) \
40 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
41
42#define PCIE_RST_CTRL_REG 0x148
43#define PCIE_MAC_RSTB BIT(0)
44#define PCIE_PHY_RSTB BIT(1)
45#define PCIE_BRG_RSTB BIT(2)
46#define PCIE_PE_RSTB BIT(3)
47
48#define PCIE_LTSSM_STATUS_REG 0x150
49#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
50#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
51#define PCIE_LTSSM_STATE_L2_IDLE 0x14
52
53#define PCIE_LINK_STATUS_REG 0x154
54#define PCIE_PORT_LINKUP BIT(8)
55
56#define PCIE_MSI_SET_NUM 8
57#define PCIE_MSI_IRQS_PER_SET 32
58#define PCIE_MSI_IRQS_NUM \
developer44e30b02021-07-02 11:12:14 +080059 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
developerfd40db22021-04-29 10:08:25 +080060
61#define PCIE_INT_ENABLE_REG 0x180
developer44e30b02021-07-02 11:12:14 +080062#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
developerfd40db22021-04-29 10:08:25 +080063#define PCIE_MSI_SHIFT 8
64#define PCIE_INTX_SHIFT 24
developer44e30b02021-07-02 11:12:14 +080065#define PCIE_INTX_ENABLE \
66 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
developerfd40db22021-04-29 10:08:25 +080067
68#define PCIE_INT_STATUS_REG 0x184
69#define PCIE_MSI_SET_ENABLE_REG 0x190
developer44e30b02021-07-02 11:12:14 +080070#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
71
72#define PCIE_MSI_SET_BASE_REG 0xc00
73#define PCIE_MSI_SET_OFFSET 0x10
74#define PCIE_MSI_SET_STATUS_OFFSET 0x04
75#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
76
77#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
78#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
developerfd40db22021-04-29 10:08:25 +080079
80#define PCIE_ICMD_PM_REG 0x198
81#define PCIE_TURN_OFF_LINK BIT(4)
82
developerfd40db22021-04-29 10:08:25 +080083#define PCIE_TRANS_TABLE_BASE_REG 0x800
84#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
85#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
86#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
87#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
88#define PCIE_ATR_TLB_SET_OFFSET 0x20
89
90#define PCIE_MAX_TRANS_TABLES 8
91#define PCIE_ATR_EN BIT(0)
92#define PCIE_ATR_SIZE(size) \
93 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
94#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
95#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
96#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
97#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
98#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
99#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
100
101/**
developer44e30b02021-07-02 11:12:14 +0800102 * struct mtk_msi_set - MSI information for each set
developerfd40db22021-04-29 10:08:25 +0800103 * @base: IO mapped register base
developerfd40db22021-04-29 10:08:25 +0800104 * @msg_addr: MSI message address
developer44e30b02021-07-02 11:12:14 +0800105 * @saved_irq_state: IRQ enable state saved at suspend time
developerfd40db22021-04-29 10:08:25 +0800106 */
developer44e30b02021-07-02 11:12:14 +0800107struct mtk_msi_set {
developerfd40db22021-04-29 10:08:25 +0800108 void __iomem *base;
developerfd40db22021-04-29 10:08:25 +0800109 phys_addr_t msg_addr;
developer44e30b02021-07-02 11:12:14 +0800110 u32 saved_irq_state;
developerfd40db22021-04-29 10:08:25 +0800111};
112
113/**
114 * struct mtk_pcie_port - PCIe port information
developer44e30b02021-07-02 11:12:14 +0800115 * @dev: pointer to PCIe device
developerfd40db22021-04-29 10:08:25 +0800116 * @base: IO mapped register base
developer44e30b02021-07-02 11:12:14 +0800117 * @reg_base: physical register base
118 * @mac_reset: MAC reset control
119 * @phy_reset: PHY reset control
developerfd40db22021-04-29 10:08:25 +0800120 * @phy: PHY controller block
121 * @clks: PCIe clocks
122 * @num_clks: PCIe clocks count for this port
123 * @irq: PCIe controller interrupt number
developer44e30b02021-07-02 11:12:14 +0800124 * @saved_irq_state: IRQ enable state saved at suspend time
125 * @irq_lock: lock protecting IRQ register access
developerfd40db22021-04-29 10:08:25 +0800126 * @intx_domain: legacy INTx IRQ domain
127 * @msi_domain: MSI IRQ domain
developer44e30b02021-07-02 11:12:14 +0800128 * @msi_bottom_domain: MSI IRQ bottom domain
129 * @msi_sets: MSI sets information
developerfd40db22021-04-29 10:08:25 +0800130 * @lock: lock protecting IRQ bit map
131 * @msi_irq_in_use: bit map for assigned MSI IRQ
132 */
133struct mtk_pcie_port {
134 struct device *dev;
135 void __iomem *base;
136 phys_addr_t reg_base;
137 struct reset_control *mac_reset;
138 struct reset_control *phy_reset;
139 struct phy *phy;
140 struct clk_bulk_data *clks;
141 int num_clks;
developerfd40db22021-04-29 10:08:25 +0800142
143 int irq;
developer44e30b02021-07-02 11:12:14 +0800144 u32 saved_irq_state;
145 raw_spinlock_t irq_lock;
developerfd40db22021-04-29 10:08:25 +0800146 struct irq_domain *intx_domain;
147 struct irq_domain *msi_domain;
developer44e30b02021-07-02 11:12:14 +0800148 struct irq_domain *msi_bottom_domain;
149 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
developerfd40db22021-04-29 10:08:25 +0800150 struct mutex lock;
151 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
152};
153
154/**
developer44e30b02021-07-02 11:12:14 +0800155 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
developerfd40db22021-04-29 10:08:25 +0800156 * @bus: PCI bus to query
157 * @devfn: device/function number
158 * @where: offset in config space
159 * @size: data size in TLP header
160 *
161 * Set byte enable field and device information in configuration TLP header.
162 */
163static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
164 int where, int size)
165{
166 struct mtk_pcie_port *port = bus->sysdata;
167 int bytes;
168 u32 val;
169
170 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
171
172 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
173 PCIE_CFG_HEADER(bus->number, devfn);
174
developer44e30b02021-07-02 11:12:14 +0800175 writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
developerfd40db22021-04-29 10:08:25 +0800176}
177
178static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
179 int where)
180{
181 struct mtk_pcie_port *port = bus->sysdata;
182
183 return port->base + PCIE_CFG_OFFSET_ADDR + where;
184}
185
186static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
187 int where, int size, u32 *val)
188{
189 mtk_pcie_config_tlp_header(bus, devfn, where, size);
190
191 return pci_generic_config_read32(bus, devfn, where, size, val);
192}
193
194static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
195 int where, int size, u32 val)
196{
197 mtk_pcie_config_tlp_header(bus, devfn, where, size);
198
199 if (size <= 2)
200 val <<= (where & 0x3) * 8;
201
202 return pci_generic_config_write32(bus, devfn, where, 4, val);
203}
204
205static struct pci_ops mtk_pcie_ops = {
206 .map_bus = mtk_pcie_map_bus,
207 .read = mtk_pcie_config_read,
208 .write = mtk_pcie_config_write,
209};
210
211static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
212 resource_size_t cpu_addr,
213 resource_size_t pci_addr,
214 resource_size_t size,
215 unsigned long type, int num)
216{
217 void __iomem *table;
developer44e30b02021-07-02 11:12:14 +0800218 u32 val;
developerfd40db22021-04-29 10:08:25 +0800219
220 if (num >= PCIE_MAX_TRANS_TABLES) {
developer44e30b02021-07-02 11:12:14 +0800221 dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
222 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
developerfd40db22021-04-29 10:08:25 +0800223 return -ENODEV;
224 }
225
226 table = port->base + PCIE_TRANS_TABLE_BASE_REG +
227 num * PCIE_ATR_TLB_SET_OFFSET;
228
developer44e30b02021-07-02 11:12:14 +0800229 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
230 table);
231 writel_relaxed(upper_32_bits(cpu_addr),
232 table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
233 writel_relaxed(lower_32_bits(pci_addr),
234 table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
235 writel_relaxed(upper_32_bits(pci_addr),
236 table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800237
238 if (type == IORESOURCE_IO)
239 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
240 else
241 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
242
developer44e30b02021-07-02 11:12:14 +0800243 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800244
245 return 0;
246}
247
developer44e30b02021-07-02 11:12:14 +0800248static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
249{
250 int i;
251 u32 val;
252
253 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
254 struct mtk_msi_set *msi_set = &port->msi_sets[i];
255
256 msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
257 i * PCIE_MSI_SET_OFFSET;
258 msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
259 i * PCIE_MSI_SET_OFFSET;
260
261 /* Configure the MSI capture address */
262 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
263 writel_relaxed(upper_32_bits(msi_set->msg_addr),
264 port->base + PCIE_MSI_SET_ADDR_HI_BASE +
265 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
266 }
267
268 val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
269 val |= PCIE_MSI_SET_ENABLE;
270 writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
271
272 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
273 val |= PCIE_MSI_ENABLE;
274 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
275}
276
developerfd40db22021-04-29 10:08:25 +0800277static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
278{
279 struct resource_entry *entry;
280 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
281 unsigned int table_index = 0;
282 int err;
283 u32 val;
284
285 /* Set as RC mode */
developer44e30b02021-07-02 11:12:14 +0800286 val = readl_relaxed(port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800287 val |= PCIE_RC_MODE;
developer44e30b02021-07-02 11:12:14 +0800288 writel_relaxed(val, port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800289
290 /* Set class code */
developer44e30b02021-07-02 11:12:14 +0800291 val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
developerfd40db22021-04-29 10:08:25 +0800292 val &= ~GENMASK(31, 8);
293 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
developer44e30b02021-07-02 11:12:14 +0800294 writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
295
296 /* Mask all INTx interrupts */
297 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
298 val &= ~PCIE_INTX_ENABLE;
299 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800300
301 /* Assert all reset signals */
developer44e30b02021-07-02 11:12:14 +0800302 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800303 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +0800304 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800305
developer44e30b02021-07-02 11:12:14 +0800306 /*
307 * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
308 * and 2.2.1 (Initial Power-Up (G3 to S0)).
309 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
310 * for the power and clock to become stable.
311 */
312 msleep(100);
developerfd40db22021-04-29 10:08:25 +0800313
developer44e30b02021-07-02 11:12:14 +0800314 /* De-assert reset signals */
315 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
316 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800317
318 /* Check if the link is up or not */
319 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
developer44e30b02021-07-02 11:12:14 +0800320 !!(val & PCIE_PORT_LINKUP), 20,
321 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
developerfd40db22021-04-29 10:08:25 +0800322 if (err) {
developer44e30b02021-07-02 11:12:14 +0800323 val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
324 dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
developerfd40db22021-04-29 10:08:25 +0800325 return err;
326 }
327
developer44e30b02021-07-02 11:12:14 +0800328 mtk_pcie_enable_msi(port);
329
developerfd40db22021-04-29 10:08:25 +0800330 /* Set PCIe translation windows */
331 resource_list_for_each_entry(entry, &host->windows) {
332 struct resource *res = entry->res;
333 unsigned long type = resource_type(res);
334 resource_size_t cpu_addr;
335 resource_size_t pci_addr;
336 resource_size_t size;
337 const char *range_type;
338
339 if (type == IORESOURCE_IO) {
340 cpu_addr = pci_pio_to_address(res->start);
341 range_type = "IO";
342 } else if (type == IORESOURCE_MEM) {
343 cpu_addr = res->start;
344 range_type = "MEM";
345 } else {
346 continue;
347 }
348
349 pci_addr = res->start - entry->offset;
350 size = resource_size(res);
351 err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
352 type, table_index);
353 if (err)
354 return err;
355
356 dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
developer44e30b02021-07-02 11:12:14 +0800357 range_type, table_index, (unsigned long long)cpu_addr,
358 (unsigned long long)pci_addr, (unsigned long long)size);
developerfd40db22021-04-29 10:08:25 +0800359
360 table_index++;
361 }
362
363 return 0;
364}
365
developerfd40db22021-04-29 10:08:25 +0800366static int mtk_pcie_set_affinity(struct irq_data *data,
367 const struct cpumask *mask, bool force)
368{
developer44e30b02021-07-02 11:12:14 +0800369 return -EINVAL;
370}
developerfd40db22021-04-29 10:08:25 +0800371
developer44e30b02021-07-02 11:12:14 +0800372static void mtk_pcie_msi_irq_mask(struct irq_data *data)
373{
374 pci_msi_mask_irq(data);
375 irq_chip_mask_parent(data);
376}
developerfd40db22021-04-29 10:08:25 +0800377
developer44e30b02021-07-02 11:12:14 +0800378static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
379{
380 pci_msi_unmask_irq(data);
381 irq_chip_unmask_parent(data);
382}
developerfd40db22021-04-29 10:08:25 +0800383
developer44e30b02021-07-02 11:12:14 +0800384static struct irq_chip mtk_msi_irq_chip = {
385 .irq_ack = irq_chip_ack_parent,
386 .irq_mask = mtk_pcie_msi_irq_mask,
387 .irq_unmask = mtk_pcie_msi_irq_unmask,
388 .name = "MSI",
389};
developerfd40db22021-04-29 10:08:25 +0800390
developer44e30b02021-07-02 11:12:14 +0800391static struct msi_domain_info mtk_msi_domain_info = {
392 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
393 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
394 .chip = &mtk_msi_irq_chip,
395};
developerfd40db22021-04-29 10:08:25 +0800396
397static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
398{
developer44e30b02021-07-02 11:12:14 +0800399 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
400 struct mtk_pcie_port *port = data->domain->host_data;
developerfd40db22021-04-29 10:08:25 +0800401 unsigned long hwirq;
402
developerfd40db22021-04-29 10:08:25 +0800403 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
404
developer44e30b02021-07-02 11:12:14 +0800405 msg->address_hi = upper_32_bits(msi_set->msg_addr);
406 msg->address_lo = lower_32_bits(msi_set->msg_addr);
developerfd40db22021-04-29 10:08:25 +0800407 msg->data = hwirq;
408 dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
409 hwirq, msg->address_hi, msg->address_lo, msg->data);
410}
411
developer44e30b02021-07-02 11:12:14 +0800412static void mtk_msi_bottom_irq_ack(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800413{
developer44e30b02021-07-02 11:12:14 +0800414 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
developerfd40db22021-04-29 10:08:25 +0800415 unsigned long hwirq;
416
developerfd40db22021-04-29 10:08:25 +0800417 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
418
developer44e30b02021-07-02 11:12:14 +0800419 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800420}
421
developer44e30b02021-07-02 11:12:14 +0800422static void mtk_msi_bottom_irq_mask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800423{
developer44e30b02021-07-02 11:12:14 +0800424 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
425 struct mtk_pcie_port *port = data->domain->host_data;
426 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800427 u32 val;
428
developerfd40db22021-04-29 10:08:25 +0800429 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
430
developer44e30b02021-07-02 11:12:14 +0800431 raw_spin_lock_irqsave(&port->irq_lock, flags);
432 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800433 val &= ~BIT(hwirq);
developer44e30b02021-07-02 11:12:14 +0800434 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
435 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800436}
437
developer44e30b02021-07-02 11:12:14 +0800438static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800439{
developer44e30b02021-07-02 11:12:14 +0800440 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
441 struct mtk_pcie_port *port = data->domain->host_data;
442 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800443 u32 val;
444
developerfd40db22021-04-29 10:08:25 +0800445 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
446
developer44e30b02021-07-02 11:12:14 +0800447 raw_spin_lock_irqsave(&port->irq_lock, flags);
448 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800449 val |= BIT(hwirq);
developer44e30b02021-07-02 11:12:14 +0800450 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
451 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800452}
453
developer44e30b02021-07-02 11:12:14 +0800454static struct irq_chip mtk_msi_bottom_irq_chip = {
455 .irq_ack = mtk_msi_bottom_irq_ack,
456 .irq_mask = mtk_msi_bottom_irq_mask,
457 .irq_unmask = mtk_msi_bottom_irq_unmask,
developerfd40db22021-04-29 10:08:25 +0800458 .irq_compose_msi_msg = mtk_compose_msi_msg,
developerfd40db22021-04-29 10:08:25 +0800459 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800460 .name = "MSI",
developerfd40db22021-04-29 10:08:25 +0800461};
462
developer44e30b02021-07-02 11:12:14 +0800463static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
464 unsigned int virq, unsigned int nr_irqs,
465 void *arg)
developerfd40db22021-04-29 10:08:25 +0800466{
developer44e30b02021-07-02 11:12:14 +0800467 struct mtk_pcie_port *port = domain->host_data;
468 struct mtk_msi_set *msi_set;
469 int i, hwirq, set_idx;
developerfd40db22021-04-29 10:08:25 +0800470
471 mutex_lock(&port->lock);
472
473 hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
developer44e30b02021-07-02 11:12:14 +0800474 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800475
476 mutex_unlock(&port->lock);
477
developer44e30b02021-07-02 11:12:14 +0800478 if (hwirq < 0)
479 return -ENOSPC;
developerfd40db22021-04-29 10:08:25 +0800480
developer44e30b02021-07-02 11:12:14 +0800481 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
482 msi_set = &port->msi_sets[set_idx];
developerfd40db22021-04-29 10:08:25 +0800483
developer44e30b02021-07-02 11:12:14 +0800484 for (i = 0; i < nr_irqs; i++)
485 irq_domain_set_info(domain, virq + i, hwirq + i,
486 &mtk_msi_bottom_irq_chip, msi_set,
487 handle_edge_irq, NULL, NULL);
developerfd40db22021-04-29 10:08:25 +0800488
developer44e30b02021-07-02 11:12:14 +0800489 return 0;
developerfd40db22021-04-29 10:08:25 +0800490}
491
developer44e30b02021-07-02 11:12:14 +0800492static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
493 unsigned int virq, unsigned int nr_irqs)
developerfd40db22021-04-29 10:08:25 +0800494{
developer44e30b02021-07-02 11:12:14 +0800495 struct mtk_pcie_port *port = domain->host_data;
496 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
developerfd40db22021-04-29 10:08:25 +0800497
developer44e30b02021-07-02 11:12:14 +0800498 mutex_lock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800499
developer44e30b02021-07-02 11:12:14 +0800500 bitmap_release_region(port->msi_irq_in_use, data->hwirq,
501 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800502
developer44e30b02021-07-02 11:12:14 +0800503 mutex_unlock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800504
developer44e30b02021-07-02 11:12:14 +0800505 irq_domain_free_irqs_common(domain, virq, nr_irqs);
developerfd40db22021-04-29 10:08:25 +0800506}
507
developer44e30b02021-07-02 11:12:14 +0800508static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
509 .alloc = mtk_msi_bottom_domain_alloc,
510 .free = mtk_msi_bottom_domain_free,
developerfd40db22021-04-29 10:08:25 +0800511};
512
513static void mtk_intx_mask(struct irq_data *data)
514{
515 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800516 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800517 u32 val;
518
developer44e30b02021-07-02 11:12:14 +0800519 raw_spin_lock_irqsave(&port->irq_lock, flags);
520 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800521 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800522 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
523 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800524}
525
526static void mtk_intx_unmask(struct irq_data *data)
527{
528 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800529 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800530 u32 val;
531
developer44e30b02021-07-02 11:12:14 +0800532 raw_spin_lock_irqsave(&port->irq_lock, flags);
533 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800534 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800535 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
536 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800537}
538
developer44e30b02021-07-02 11:12:14 +0800539/**
540 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
541 * @data: pointer to chip specific data
542 *
543 * As an emulated level IRQ, its interrupt status will remain
544 * until the corresponding de-assert message is received; hence that
545 * the status can only be cleared when the interrupt has been serviced.
546 */
developerfd40db22021-04-29 10:08:25 +0800547static void mtk_intx_eoi(struct irq_data *data)
548{
549 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
550 unsigned long hwirq;
551
developerfd40db22021-04-29 10:08:25 +0800552 hwirq = data->hwirq + PCIE_INTX_SHIFT;
developer44e30b02021-07-02 11:12:14 +0800553 writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800554}
555
556static struct irq_chip mtk_intx_irq_chip = {
557 .irq_mask = mtk_intx_mask,
558 .irq_unmask = mtk_intx_unmask,
559 .irq_eoi = mtk_intx_eoi,
560 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800561 .name = "INTx",
developerfd40db22021-04-29 10:08:25 +0800562};
563
564static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
565 irq_hw_number_t hwirq)
566{
developer44e30b02021-07-02 11:12:14 +0800567 irq_set_chip_data(irq, domain->host_data);
developerfd40db22021-04-29 10:08:25 +0800568 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
569 handle_fasteoi_irq, "INTx");
developerfd40db22021-04-29 10:08:25 +0800570 return 0;
571}
572
573static const struct irq_domain_ops intx_domain_ops = {
574 .map = mtk_pcie_intx_map,
575};
576
developer44e30b02021-07-02 11:12:14 +0800577static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800578{
579 struct device *dev = port->dev;
developer44e30b02021-07-02 11:12:14 +0800580 struct device_node *intc_node, *node = dev->of_node;
581 int ret;
582
583 raw_spin_lock_init(&port->irq_lock);
developerfd40db22021-04-29 10:08:25 +0800584
585 /* Setup INTx */
586 intc_node = of_get_child_by_name(node, "interrupt-controller");
587 if (!intc_node) {
developer44e30b02021-07-02 11:12:14 +0800588 dev_err(dev, "missing interrupt-controller node\n");
developerfd40db22021-04-29 10:08:25 +0800589 return -ENODEV;
590 }
591
592 port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
593 &intx_domain_ops, port);
594 if (!port->intx_domain) {
developer44e30b02021-07-02 11:12:14 +0800595 dev_err(dev, "failed to create INTx IRQ domain\n");
developerfd40db22021-04-29 10:08:25 +0800596 return -ENODEV;
597 }
598
599 /* Setup MSI */
600 mutex_init(&port->lock);
601
developer44e30b02021-07-02 11:12:14 +0800602 port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
603 &mtk_msi_bottom_domain_ops, port);
604 if (!port->msi_bottom_domain) {
605 dev_err(dev, "failed to create MSI bottom domain\n");
developerfd40db22021-04-29 10:08:25 +0800606 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800607 goto err_msi_bottom_domain;
developerfd40db22021-04-29 10:08:25 +0800608 }
609
developer44e30b02021-07-02 11:12:14 +0800610 port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
611 &mtk_msi_domain_info,
612 port->msi_bottom_domain);
613 if (!port->msi_domain) {
614 dev_err(dev, "failed to create MSI domain\n");
developerfd40db22021-04-29 10:08:25 +0800615 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800616 goto err_msi_domain;
developerfd40db22021-04-29 10:08:25 +0800617 }
618
619 return 0;
620
developerfd40db22021-04-29 10:08:25 +0800621err_msi_domain:
developer44e30b02021-07-02 11:12:14 +0800622 irq_domain_remove(port->msi_bottom_domain);
623err_msi_bottom_domain:
developerfd40db22021-04-29 10:08:25 +0800624 irq_domain_remove(port->intx_domain);
625
626 return ret;
627}
628
629static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
630{
developerfd40db22021-04-29 10:08:25 +0800631 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
632
633 if (port->intx_domain)
634 irq_domain_remove(port->intx_domain);
635
636 if (port->msi_domain)
637 irq_domain_remove(port->msi_domain);
638
developer44e30b02021-07-02 11:12:14 +0800639 if (port->msi_bottom_domain)
640 irq_domain_remove(port->msi_bottom_domain);
developerfd40db22021-04-29 10:08:25 +0800641
642 irq_dispose_mapping(port->irq);
643}
644
developer44e30b02021-07-02 11:12:14 +0800645static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
646{
647 struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
648 unsigned long msi_enable, msi_status;
649 unsigned int virq;
650 irq_hw_number_t bit, hwirq;
651
652 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
653
654 do {
655 msi_status = readl_relaxed(msi_set->base +
656 PCIE_MSI_SET_STATUS_OFFSET);
657 msi_status &= msi_enable;
658 if (!msi_status)
659 break;
660
661 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
662 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
663 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
664 generic_handle_irq(virq);
665 }
666 } while (true);
667}
668
developerfd40db22021-04-29 10:08:25 +0800669static void mtk_pcie_irq_handler(struct irq_desc *desc)
670{
671 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
672 struct irq_chip *irqchip = irq_desc_get_chip(desc);
673 unsigned long status;
674 unsigned int virq;
675 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
676
677 chained_irq_enter(irqchip, desc);
678
developer44e30b02021-07-02 11:12:14 +0800679 status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
680 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
681 PCIE_INTX_SHIFT) {
682 virq = irq_find_mapping(port->intx_domain,
683 irq_bit - PCIE_INTX_SHIFT);
684 generic_handle_irq(virq);
developerfd40db22021-04-29 10:08:25 +0800685 }
686
developer44e30b02021-07-02 11:12:14 +0800687 irq_bit = PCIE_MSI_SHIFT;
688 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
689 PCIE_MSI_SHIFT) {
690 mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
691
692 writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800693 }
694
695 chained_irq_exit(irqchip, desc);
696}
697
developer44e30b02021-07-02 11:12:14 +0800698static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800699{
700 struct device *dev = port->dev;
701 struct platform_device *pdev = to_platform_device(dev);
702 int err;
703
developer44e30b02021-07-02 11:12:14 +0800704 err = mtk_pcie_init_irq_domains(port);
705 if (err)
developerfd40db22021-04-29 10:08:25 +0800706 return err;
developerfd40db22021-04-29 10:08:25 +0800707
708 port->irq = platform_get_irq(pdev, 0);
709 if (port->irq < 0)
710 return port->irq;
711
712 irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
713
714 return 0;
715}
716
developer44e30b02021-07-02 11:12:14 +0800717static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800718{
developer44e30b02021-07-02 11:12:14 +0800719 struct device *dev = port->dev;
720 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
721 struct platform_device *pdev = to_platform_device(dev);
722 struct list_head *windows = &host->windows;
723 struct resource *regs, *bus;
developerfd40db22021-04-29 10:08:25 +0800724 int ret;
725
developer44e30b02021-07-02 11:12:14 +0800726 ret = pci_parse_request_of_pci_ranges(dev, windows, &bus);
727 if (ret) {
728 dev_err(dev, "failed to parse pci ranges\n");
729 return ret;
developerfd40db22021-04-29 10:08:25 +0800730 }
731
developer44e30b02021-07-02 11:12:14 +0800732 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
733 port->base = devm_ioremap_resource(dev, regs);
734 if (IS_ERR(port->base)) {
735 dev_err(dev, "failed to map register base\n");
736 return PTR_ERR(port->base);
737 }
738
739 port->reg_base = regs->start;
740
741 port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
742 if (IS_ERR(port->phy_reset)) {
743 ret = PTR_ERR(port->phy_reset);
744 if (ret != -EPROBE_DEFER)
745 dev_err(dev, "failed to get PHY reset\n");
746
747 return ret;
748 }
749
750 port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
751 if (IS_ERR(port->mac_reset)) {
752 ret = PTR_ERR(port->mac_reset);
753 if (ret != -EPROBE_DEFER)
754 dev_err(dev, "failed to get MAC reset\n");
755
developerfd40db22021-04-29 10:08:25 +0800756 return ret;
757 }
758
developer44e30b02021-07-02 11:12:14 +0800759 port->phy = devm_phy_optional_get(dev, "pcie-phy");
760 if (IS_ERR(port->phy)) {
761 ret = PTR_ERR(port->phy);
762 if (ret != -EPROBE_DEFER)
763 dev_err(dev, "failed to get PHY\n");
764
765 return ret;
766 }
767
768 port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
769 if (port->num_clks < 0) {
770 dev_err(dev, "failed to get clocks\n");
771 return port->num_clks;
772 }
773
developerfd40db22021-04-29 10:08:25 +0800774 return 0;
775}
776
777static int mtk_pcie_power_up(struct mtk_pcie_port *port)
778{
779 struct device *dev = port->dev;
780 int err;
781
developerfd40db22021-04-29 10:08:25 +0800782 /* PHY power on and enable pipe clock */
developerfd40db22021-04-29 10:08:25 +0800783 reset_control_deassert(port->phy_reset);
784
developerfd40db22021-04-29 10:08:25 +0800785 err = phy_init(port->phy);
786 if (err) {
developer44e30b02021-07-02 11:12:14 +0800787 dev_err(dev, "failed to initialize PHY\n");
developerfd40db22021-04-29 10:08:25 +0800788 goto err_phy_init;
789 }
790
developer44e30b02021-07-02 11:12:14 +0800791 err = phy_power_on(port->phy);
792 if (err) {
793 dev_err(dev, "failed to power on PHY\n");
794 goto err_phy_on;
developerfd40db22021-04-29 10:08:25 +0800795 }
796
developer44e30b02021-07-02 11:12:14 +0800797 /* MAC power on and enable transaction layer clocks */
developerfd40db22021-04-29 10:08:25 +0800798 reset_control_deassert(port->mac_reset);
799
developerfd40db22021-04-29 10:08:25 +0800800 pm_runtime_enable(dev);
801 pm_runtime_get_sync(dev);
802
developer44e30b02021-07-02 11:12:14 +0800803 err = clk_bulk_prepare_enable(port->num_clks, port->clks);
developerfd40db22021-04-29 10:08:25 +0800804 if (err) {
developer44e30b02021-07-02 11:12:14 +0800805 dev_err(dev, "failed to enable clocks\n");
developerfd40db22021-04-29 10:08:25 +0800806 goto err_clk_init;
807 }
808
809 return 0;
810
811err_clk_init:
812 pm_runtime_put_sync(dev);
813 pm_runtime_disable(dev);
814 reset_control_assert(port->mac_reset);
developerfd40db22021-04-29 10:08:25 +0800815 phy_power_off(port->phy);
816err_phy_on:
developer44e30b02021-07-02 11:12:14 +0800817 phy_exit(port->phy);
818err_phy_init:
developerfd40db22021-04-29 10:08:25 +0800819 reset_control_assert(port->phy_reset);
820
821 return err;
822}
823
824static void mtk_pcie_power_down(struct mtk_pcie_port *port)
825{
826 clk_bulk_disable_unprepare(port->num_clks, port->clks);
827
828 pm_runtime_put_sync(port->dev);
829 pm_runtime_disable(port->dev);
830 reset_control_assert(port->mac_reset);
831
832 phy_power_off(port->phy);
833 phy_exit(port->phy);
834 reset_control_assert(port->phy_reset);
835}
836
837static int mtk_pcie_setup(struct mtk_pcie_port *port)
838{
developerfd40db22021-04-29 10:08:25 +0800839 int err;
840
developer44e30b02021-07-02 11:12:14 +0800841 err = mtk_pcie_parse_port(port);
developerfd40db22021-04-29 10:08:25 +0800842 if (err)
843 return err;
844
developerfd40db22021-04-29 10:08:25 +0800845 /* Don't touch the hardware registers before power up */
846 err = mtk_pcie_power_up(port);
847 if (err)
848 return err;
849
850 /* Try link up */
851 err = mtk_pcie_startup_port(port);
developer44e30b02021-07-02 11:12:14 +0800852 if (err)
developerfd40db22021-04-29 10:08:25 +0800853 goto err_setup;
developerfd40db22021-04-29 10:08:25 +0800854
developer44e30b02021-07-02 11:12:14 +0800855 err = mtk_pcie_setup_irq(port);
developerfd40db22021-04-29 10:08:25 +0800856 if (err)
857 goto err_setup;
858
developerfd40db22021-04-29 10:08:25 +0800859 return 0;
860
861err_setup:
862 mtk_pcie_power_down(port);
863
864 return err;
865}
866
developerfd40db22021-04-29 10:08:25 +0800867static int mtk_pcie_probe(struct platform_device *pdev)
868{
869 struct device *dev = &pdev->dev;
870 struct mtk_pcie_port *port;
871 struct pci_host_bridge *host;
872 int err;
873
874 host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
875 if (!host)
876 return -ENOMEM;
877
878 port = pci_host_bridge_priv(host);
879
880 port->dev = dev;
881 platform_set_drvdata(pdev, port);
882
883 err = mtk_pcie_setup(port);
884 if (err)
developer44e30b02021-07-02 11:12:14 +0800885 return err;
developerfd40db22021-04-29 10:08:25 +0800886
developerfd40db22021-04-29 10:08:25 +0800887 host->dev.parent = port->dev;
developer44e30b02021-07-02 11:12:14 +0800888 host->ops = &mtk_pcie_ops;
developerfd40db22021-04-29 10:08:25 +0800889 host->map_irq = of_irq_parse_and_map_pci;
890 host->swizzle_irq = pci_common_swizzle;
developerfd40db22021-04-29 10:08:25 +0800891 host->sysdata = port;
892
893 err = pci_host_probe(host);
894 if (err) {
895 mtk_pcie_irq_teardown(port);
896 mtk_pcie_power_down(port);
developer44e30b02021-07-02 11:12:14 +0800897 return err;
developerfd40db22021-04-29 10:08:25 +0800898 }
899
900 return 0;
developerfd40db22021-04-29 10:08:25 +0800901}
902
903static int mtk_pcie_remove(struct platform_device *pdev)
904{
905 struct mtk_pcie_port *port = platform_get_drvdata(pdev);
906 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
907
908 pci_lock_rescan_remove();
909 pci_stop_root_bus(host->bus);
910 pci_remove_root_bus(host->bus);
911 pci_unlock_rescan_remove();
912
913 mtk_pcie_irq_teardown(port);
914 mtk_pcie_power_down(port);
915
916 return 0;
917}
918
developer44e30b02021-07-02 11:12:14 +0800919static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
920{
921 int i;
922
923 raw_spin_lock(&port->irq_lock);
924
925 port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
926
927 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
928 struct mtk_msi_set *msi_set = &port->msi_sets[i];
929
930 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
931 PCIE_MSI_SET_ENABLE_OFFSET);
932 }
933
934 raw_spin_unlock(&port->irq_lock);
935}
936
937static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
938{
939 int i;
940
941 raw_spin_lock(&port->irq_lock);
942
943 writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
944
945 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
946 struct mtk_msi_set *msi_set = &port->msi_sets[i];
947
948 writel_relaxed(msi_set->saved_irq_state,
949 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
950 }
951
952 raw_spin_unlock(&port->irq_lock);
953}
954
developerfd40db22021-04-29 10:08:25 +0800955static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
956{
957 u32 val;
958
developer44e30b02021-07-02 11:12:14 +0800959 val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +0800960 val |= PCIE_TURN_OFF_LINK;
developer44e30b02021-07-02 11:12:14 +0800961 writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +0800962
963 /* Check the link is L2 */
964 return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
965 (PCIE_LTSSM_STATE(val) ==
966 PCIE_LTSSM_STATE_L2_IDLE), 20,
967 50 * USEC_PER_MSEC);
968}
969
970static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
971{
972 struct mtk_pcie_port *port = dev_get_drvdata(dev);
973 int err;
974 u32 val;
975
976 /* Trigger link to L2 state */
977 err = mtk_pcie_turn_off_link(port);
978 if (err) {
developer44e30b02021-07-02 11:12:14 +0800979 dev_err(port->dev, "cannot enter L2 state\n");
developerfd40db22021-04-29 10:08:25 +0800980 return err;
981 }
982
983 /* Pull down the PERST# pin */
developer44e30b02021-07-02 11:12:14 +0800984 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800985 val |= PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +0800986 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800987
developer44e30b02021-07-02 11:12:14 +0800988 dev_dbg(port->dev, "entered L2 states successfully");
developerfd40db22021-04-29 10:08:25 +0800989
developer44e30b02021-07-02 11:12:14 +0800990 mtk_pcie_irq_save(port);
991 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +0800992
993 return 0;
994}
995
996static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
997{
998 struct mtk_pcie_port *port = dev_get_drvdata(dev);
999 int err;
1000
developer44e30b02021-07-02 11:12:14 +08001001 err = mtk_pcie_power_up(port);
1002 if (err)
developerfd40db22021-04-29 10:08:25 +08001003 return err;
developerfd40db22021-04-29 10:08:25 +08001004
1005 err = mtk_pcie_startup_port(port);
1006 if (err) {
developer44e30b02021-07-02 11:12:14 +08001007 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001008 return err;
1009 }
1010
developer44e30b02021-07-02 11:12:14 +08001011 mtk_pcie_irq_restore(port);
developerfd40db22021-04-29 10:08:25 +08001012
1013 return 0;
1014}
1015
1016static const struct dev_pm_ops mtk_pcie_pm_ops = {
1017 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1018 mtk_pcie_resume_noirq)
1019};
1020
1021static const struct of_device_id mtk_pcie_of_match[] = {
1022 { .compatible = "mediatek,mt8192-pcie" },
developer44e30b02021-07-02 11:12:14 +08001023 { .compatible = "mediatek,mt7986-pcie" },
developerfd40db22021-04-29 10:08:25 +08001024 {},
1025};
1026
1027static struct platform_driver mtk_pcie_driver = {
1028 .probe = mtk_pcie_probe,
1029 .remove = mtk_pcie_remove,
1030 .driver = {
1031 .name = "mtk-pcie",
1032 .of_match_table = mtk_pcie_of_match,
1033 .pm = &mtk_pcie_pm_ops,
1034 },
1035};
1036
1037module_platform_driver(mtk_pcie_driver);
1038MODULE_LICENSE("GPL v2");