blob: 0e01fde167d51fda9350b5ddcd151a2040a7a80f [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/iopoll.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
developerfd40db22021-04-29 10:08:25 +080018#include <linux/of_pci.h>
developerfd40db22021-04-29 10:08:25 +080019#include <linux/pci.h>
20#include <linux/phy/phy.h>
21#include <linux/platform_device.h>
22#include <linux/pm_domain.h>
23#include <linux/pm_runtime.h>
24#include <linux/reset.h>
25
26#include "../pci.h"
27
28#define PCIE_SETTING_REG 0x80
29#define PCIE_PCI_IDS_1 0x9c
30#define PCI_CLASS(class) (class << 8)
31#define PCIE_RC_MODE BIT(0)
32
33#define PCIE_CFGNUM_REG 0x140
34#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
35#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
36#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
37#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
38#define PCIE_CFG_OFFSET_ADDR 0x1000
39#define PCIE_CFG_HEADER(bus, devfn) \
40 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
41
42#define PCIE_RST_CTRL_REG 0x148
43#define PCIE_MAC_RSTB BIT(0)
44#define PCIE_PHY_RSTB BIT(1)
45#define PCIE_BRG_RSTB BIT(2)
46#define PCIE_PE_RSTB BIT(3)
47
48#define PCIE_LTSSM_STATUS_REG 0x150
49#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
50#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
51#define PCIE_LTSSM_STATE_L2_IDLE 0x14
52
53#define PCIE_LINK_STATUS_REG 0x154
54#define PCIE_PORT_LINKUP BIT(8)
55
56#define PCIE_MSI_SET_NUM 8
57#define PCIE_MSI_IRQS_PER_SET 32
58#define PCIE_MSI_IRQS_NUM \
developer44e30b02021-07-02 11:12:14 +080059 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
developerfd40db22021-04-29 10:08:25 +080060
61#define PCIE_INT_ENABLE_REG 0x180
developer44e30b02021-07-02 11:12:14 +080062#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
developerfd40db22021-04-29 10:08:25 +080063#define PCIE_MSI_SHIFT 8
64#define PCIE_INTX_SHIFT 24
developer44e30b02021-07-02 11:12:14 +080065#define PCIE_INTX_ENABLE \
66 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
developerfd40db22021-04-29 10:08:25 +080067
68#define PCIE_INT_STATUS_REG 0x184
69#define PCIE_MSI_SET_ENABLE_REG 0x190
developer44e30b02021-07-02 11:12:14 +080070#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
71
72#define PCIE_MSI_SET_BASE_REG 0xc00
73#define PCIE_MSI_SET_OFFSET 0x10
74#define PCIE_MSI_SET_STATUS_OFFSET 0x04
75#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
developer63dcf012021-09-02 10:14:03 +080076#define PCIE_MSI_SET_GRP1_ENABLE_OFFSET 0x0c
developer44e30b02021-07-02 11:12:14 +080077
78#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
79#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
developerfd40db22021-04-29 10:08:25 +080080
81#define PCIE_ICMD_PM_REG 0x198
82#define PCIE_TURN_OFF_LINK BIT(4)
83
developerfd40db22021-04-29 10:08:25 +080084#define PCIE_TRANS_TABLE_BASE_REG 0x800
85#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
86#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
87#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
88#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
89#define PCIE_ATR_TLB_SET_OFFSET 0x20
90
91#define PCIE_MAX_TRANS_TABLES 8
92#define PCIE_ATR_EN BIT(0)
93#define PCIE_ATR_SIZE(size) \
94 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
95#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
96#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
97#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
98#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
99#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
100#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
101
102/**
developer44e30b02021-07-02 11:12:14 +0800103 * struct mtk_msi_set - MSI information for each set
developerfd40db22021-04-29 10:08:25 +0800104 * @base: IO mapped register base
developerfd40db22021-04-29 10:08:25 +0800105 * @msg_addr: MSI message address
developer44e30b02021-07-02 11:12:14 +0800106 * @saved_irq_state: IRQ enable state saved at suspend time
developerfd40db22021-04-29 10:08:25 +0800107 */
developer44e30b02021-07-02 11:12:14 +0800108struct mtk_msi_set {
developerfd40db22021-04-29 10:08:25 +0800109 void __iomem *base;
developerfd40db22021-04-29 10:08:25 +0800110 phys_addr_t msg_addr;
developer44e30b02021-07-02 11:12:14 +0800111 u32 saved_irq_state;
developerfd40db22021-04-29 10:08:25 +0800112};
113
114/**
115 * struct mtk_pcie_port - PCIe port information
developer44e30b02021-07-02 11:12:14 +0800116 * @dev: pointer to PCIe device
developerfd40db22021-04-29 10:08:25 +0800117 * @base: IO mapped register base
developer44e30b02021-07-02 11:12:14 +0800118 * @reg_base: physical register base
119 * @mac_reset: MAC reset control
120 * @phy_reset: PHY reset control
developerfd40db22021-04-29 10:08:25 +0800121 * @phy: PHY controller block
122 * @clks: PCIe clocks
123 * @num_clks: PCIe clocks count for this port
124 * @irq: PCIe controller interrupt number
developer44e30b02021-07-02 11:12:14 +0800125 * @saved_irq_state: IRQ enable state saved at suspend time
126 * @irq_lock: lock protecting IRQ register access
developerfd40db22021-04-29 10:08:25 +0800127 * @intx_domain: legacy INTx IRQ domain
128 * @msi_domain: MSI IRQ domain
developer44e30b02021-07-02 11:12:14 +0800129 * @msi_bottom_domain: MSI IRQ bottom domain
130 * @msi_sets: MSI sets information
developerfd40db22021-04-29 10:08:25 +0800131 * @lock: lock protecting IRQ bit map
132 * @msi_irq_in_use: bit map for assigned MSI IRQ
133 */
134struct mtk_pcie_port {
135 struct device *dev;
136 void __iomem *base;
137 phys_addr_t reg_base;
138 struct reset_control *mac_reset;
139 struct reset_control *phy_reset;
140 struct phy *phy;
141 struct clk_bulk_data *clks;
142 int num_clks;
developerfd40db22021-04-29 10:08:25 +0800143
144 int irq;
developer63dcf012021-09-02 10:14:03 +0800145 int direct_msi_enable;
146 int direct_msi[PCIE_MSI_IRQS_PER_SET];
developer44e30b02021-07-02 11:12:14 +0800147 u32 saved_irq_state;
148 raw_spinlock_t irq_lock;
developerfd40db22021-04-29 10:08:25 +0800149 struct irq_domain *intx_domain;
150 struct irq_domain *msi_domain;
developer44e30b02021-07-02 11:12:14 +0800151 struct irq_domain *msi_bottom_domain;
152 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
developerfd40db22021-04-29 10:08:25 +0800153 struct mutex lock;
154 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
155};
156
157/**
developer44e30b02021-07-02 11:12:14 +0800158 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
developerfd40db22021-04-29 10:08:25 +0800159 * @bus: PCI bus to query
160 * @devfn: device/function number
161 * @where: offset in config space
162 * @size: data size in TLP header
163 *
164 * Set byte enable field and device information in configuration TLP header.
165 */
166static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
167 int where, int size)
168{
169 struct mtk_pcie_port *port = bus->sysdata;
170 int bytes;
171 u32 val;
172
173 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
174
175 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
176 PCIE_CFG_HEADER(bus->number, devfn);
177
developer44e30b02021-07-02 11:12:14 +0800178 writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
developerfd40db22021-04-29 10:08:25 +0800179}
180
181static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
182 int where)
183{
184 struct mtk_pcie_port *port = bus->sysdata;
185
186 return port->base + PCIE_CFG_OFFSET_ADDR + where;
187}
188
189static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
190 int where, int size, u32 *val)
191{
192 mtk_pcie_config_tlp_header(bus, devfn, where, size);
193
194 return pci_generic_config_read32(bus, devfn, where, size, val);
195}
196
197static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
198 int where, int size, u32 val)
199{
200 mtk_pcie_config_tlp_header(bus, devfn, where, size);
201
202 if (size <= 2)
203 val <<= (where & 0x3) * 8;
204
205 return pci_generic_config_write32(bus, devfn, where, 4, val);
206}
207
208static struct pci_ops mtk_pcie_ops = {
209 .map_bus = mtk_pcie_map_bus,
210 .read = mtk_pcie_config_read,
211 .write = mtk_pcie_config_write,
212};
213
214static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
215 resource_size_t cpu_addr,
216 resource_size_t pci_addr,
217 resource_size_t size,
218 unsigned long type, int num)
219{
220 void __iomem *table;
developer44e30b02021-07-02 11:12:14 +0800221 u32 val;
developerfd40db22021-04-29 10:08:25 +0800222
223 if (num >= PCIE_MAX_TRANS_TABLES) {
developer44e30b02021-07-02 11:12:14 +0800224 dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
225 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
developerfd40db22021-04-29 10:08:25 +0800226 return -ENODEV;
227 }
228
229 table = port->base + PCIE_TRANS_TABLE_BASE_REG +
230 num * PCIE_ATR_TLB_SET_OFFSET;
231
developer44e30b02021-07-02 11:12:14 +0800232 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
233 table);
234 writel_relaxed(upper_32_bits(cpu_addr),
235 table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
236 writel_relaxed(lower_32_bits(pci_addr),
237 table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
238 writel_relaxed(upper_32_bits(pci_addr),
239 table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800240
241 if (type == IORESOURCE_IO)
242 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
243 else
244 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
245
developer44e30b02021-07-02 11:12:14 +0800246 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800247
248 return 0;
249}
250
developer44e30b02021-07-02 11:12:14 +0800251static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
252{
253 int i;
254 u32 val;
255
256 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
257 struct mtk_msi_set *msi_set = &port->msi_sets[i];
258
259 msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
260 i * PCIE_MSI_SET_OFFSET;
261 msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
262 i * PCIE_MSI_SET_OFFSET;
263
264 /* Configure the MSI capture address */
265 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
266 writel_relaxed(upper_32_bits(msi_set->msg_addr),
267 port->base + PCIE_MSI_SET_ADDR_HI_BASE +
268 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
269 }
270
271 val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
272 val |= PCIE_MSI_SET_ENABLE;
273 writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
274
developer63dcf012021-09-02 10:14:03 +0800275 if (!port->direct_msi_enable) {
276 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
277 val |= PCIE_MSI_ENABLE;
278 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
279 }
developer44e30b02021-07-02 11:12:14 +0800280}
281
developerfd40db22021-04-29 10:08:25 +0800282static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
283{
284 struct resource_entry *entry;
285 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
286 unsigned int table_index = 0;
287 int err;
288 u32 val;
289
290 /* Set as RC mode */
developer44e30b02021-07-02 11:12:14 +0800291 val = readl_relaxed(port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800292 val |= PCIE_RC_MODE;
developer44e30b02021-07-02 11:12:14 +0800293 writel_relaxed(val, port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800294
295 /* Set class code */
developer44e30b02021-07-02 11:12:14 +0800296 val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
developerfd40db22021-04-29 10:08:25 +0800297 val &= ~GENMASK(31, 8);
298 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
developer44e30b02021-07-02 11:12:14 +0800299 writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
300
301 /* Mask all INTx interrupts */
302 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
303 val &= ~PCIE_INTX_ENABLE;
304 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800305
306 /* Assert all reset signals */
developer44e30b02021-07-02 11:12:14 +0800307 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800308 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +0800309 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800310
developer44e30b02021-07-02 11:12:14 +0800311 /*
312 * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
313 * and 2.2.1 (Initial Power-Up (G3 to S0)).
314 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
315 * for the power and clock to become stable.
316 */
317 msleep(100);
developerfd40db22021-04-29 10:08:25 +0800318
developer44e30b02021-07-02 11:12:14 +0800319 /* De-assert reset signals */
320 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
321 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800322
323 /* Check if the link is up or not */
324 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
developer44e30b02021-07-02 11:12:14 +0800325 !!(val & PCIE_PORT_LINKUP), 20,
326 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
developerfd40db22021-04-29 10:08:25 +0800327 if (err) {
developer44e30b02021-07-02 11:12:14 +0800328 val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
329 dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
developerfd40db22021-04-29 10:08:25 +0800330 return err;
331 }
332
developer44e30b02021-07-02 11:12:14 +0800333 mtk_pcie_enable_msi(port);
334
developerfd40db22021-04-29 10:08:25 +0800335 /* Set PCIe translation windows */
336 resource_list_for_each_entry(entry, &host->windows) {
337 struct resource *res = entry->res;
338 unsigned long type = resource_type(res);
339 resource_size_t cpu_addr;
340 resource_size_t pci_addr;
341 resource_size_t size;
342 const char *range_type;
343
344 if (type == IORESOURCE_IO) {
345 cpu_addr = pci_pio_to_address(res->start);
346 range_type = "IO";
347 } else if (type == IORESOURCE_MEM) {
348 cpu_addr = res->start;
349 range_type = "MEM";
350 } else {
351 continue;
352 }
353
354 pci_addr = res->start - entry->offset;
355 size = resource_size(res);
356 err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
357 type, table_index);
358 if (err)
359 return err;
360
361 dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
developer44e30b02021-07-02 11:12:14 +0800362 range_type, table_index, (unsigned long long)cpu_addr,
363 (unsigned long long)pci_addr, (unsigned long long)size);
developerfd40db22021-04-29 10:08:25 +0800364
365 table_index++;
366 }
367
368 return 0;
369}
370
developer63dcf012021-09-02 10:14:03 +0800371static int mtk_pcie_set_msi_affinity(struct irq_data *data,
372 const struct cpumask *mask, bool force)
373{
374 struct mtk_pcie_port *port = data->domain->host_data;
375 struct irq_data *port_data;
376 struct irq_chip *port_chip;
377 int msi_bit, irq, ret;
378
379 msi_bit = data->hwirq % PCIE_MSI_IRQS_PER_SET;
380 irq = port->direct_msi[msi_bit];
381
382 port_data = irq_get_irq_data(irq);
383 port_chip = irq_data_get_irq_chip(port_data);
384 if (!port_chip || !port_chip->irq_set_affinity)
385 return -EINVAL;
386
387 ret = port_chip->irq_set_affinity(port_data, mask, force);
388
389 irq_data_update_effective_affinity(data, mask);
390
391 return ret;
392}
393
developerfd40db22021-04-29 10:08:25 +0800394static int mtk_pcie_set_affinity(struct irq_data *data,
395 const struct cpumask *mask, bool force)
396{
developer44e30b02021-07-02 11:12:14 +0800397 return -EINVAL;
398}
developerfd40db22021-04-29 10:08:25 +0800399
developer44e30b02021-07-02 11:12:14 +0800400static void mtk_pcie_msi_irq_mask(struct irq_data *data)
401{
402 pci_msi_mask_irq(data);
403 irq_chip_mask_parent(data);
404}
developerfd40db22021-04-29 10:08:25 +0800405
developer44e30b02021-07-02 11:12:14 +0800406static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
407{
408 pci_msi_unmask_irq(data);
409 irq_chip_unmask_parent(data);
410}
developerfd40db22021-04-29 10:08:25 +0800411
developer44e30b02021-07-02 11:12:14 +0800412static struct irq_chip mtk_msi_irq_chip = {
413 .irq_ack = irq_chip_ack_parent,
414 .irq_mask = mtk_pcie_msi_irq_mask,
415 .irq_unmask = mtk_pcie_msi_irq_unmask,
416 .name = "MSI",
417};
developerfd40db22021-04-29 10:08:25 +0800418
developer44e30b02021-07-02 11:12:14 +0800419static struct msi_domain_info mtk_msi_domain_info = {
420 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
421 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
422 .chip = &mtk_msi_irq_chip,
423};
developerfd40db22021-04-29 10:08:25 +0800424
425static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
426{
developer44e30b02021-07-02 11:12:14 +0800427 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
428 struct mtk_pcie_port *port = data->domain->host_data;
developerfd40db22021-04-29 10:08:25 +0800429 unsigned long hwirq;
430
developerfd40db22021-04-29 10:08:25 +0800431 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
432
developer44e30b02021-07-02 11:12:14 +0800433 msg->address_hi = upper_32_bits(msi_set->msg_addr);
434 msg->address_lo = lower_32_bits(msi_set->msg_addr);
developerfd40db22021-04-29 10:08:25 +0800435 msg->data = hwirq;
436 dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
437 hwirq, msg->address_hi, msg->address_lo, msg->data);
438}
439
developer44e30b02021-07-02 11:12:14 +0800440static void mtk_msi_bottom_irq_ack(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800441{
developer44e30b02021-07-02 11:12:14 +0800442 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
developerfd40db22021-04-29 10:08:25 +0800443 unsigned long hwirq;
444
developerfd40db22021-04-29 10:08:25 +0800445 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
446
developer44e30b02021-07-02 11:12:14 +0800447 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800448}
449
developer44e30b02021-07-02 11:12:14 +0800450static void mtk_msi_bottom_irq_mask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800451{
developer44e30b02021-07-02 11:12:14 +0800452 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
453 struct mtk_pcie_port *port = data->domain->host_data;
454 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800455 u32 val;
456
developerfd40db22021-04-29 10:08:25 +0800457 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
458
developer44e30b02021-07-02 11:12:14 +0800459 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer63dcf012021-09-02 10:14:03 +0800460 if (port->direct_msi_enable) {
461 val = readl_relaxed(msi_set->base +
462 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
463 val &= ~BIT(hwirq);
464 writel_relaxed(val, msi_set->base +
465 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
466 } else {
467 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
468 val &= ~BIT(hwirq);
469 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
470 }
developer44e30b02021-07-02 11:12:14 +0800471 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800472}
473
developer44e30b02021-07-02 11:12:14 +0800474static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800475{
developer44e30b02021-07-02 11:12:14 +0800476 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
477 struct mtk_pcie_port *port = data->domain->host_data;
478 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800479 u32 val;
480
developerfd40db22021-04-29 10:08:25 +0800481 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
482
developer44e30b02021-07-02 11:12:14 +0800483 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer63dcf012021-09-02 10:14:03 +0800484 if (port->direct_msi_enable) {
485 val = readl_relaxed(msi_set->base +
486 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
487 val |= BIT(hwirq);
488 writel_relaxed(val, msi_set->base +
489 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
490 } else {
491 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
492 val |= BIT(hwirq);
493 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
494 }
developer44e30b02021-07-02 11:12:14 +0800495 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800496}
497
developer44e30b02021-07-02 11:12:14 +0800498static struct irq_chip mtk_msi_bottom_irq_chip = {
499 .irq_ack = mtk_msi_bottom_irq_ack,
500 .irq_mask = mtk_msi_bottom_irq_mask,
501 .irq_unmask = mtk_msi_bottom_irq_unmask,
developerfd40db22021-04-29 10:08:25 +0800502 .irq_compose_msi_msg = mtk_compose_msi_msg,
developerfd40db22021-04-29 10:08:25 +0800503 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800504 .name = "MSI",
developerfd40db22021-04-29 10:08:25 +0800505};
506
developer44e30b02021-07-02 11:12:14 +0800507static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
508 unsigned int virq, unsigned int nr_irqs,
509 void *arg)
developerfd40db22021-04-29 10:08:25 +0800510{
developer44e30b02021-07-02 11:12:14 +0800511 struct mtk_pcie_port *port = domain->host_data;
512 struct mtk_msi_set *msi_set;
513 int i, hwirq, set_idx;
developerfd40db22021-04-29 10:08:25 +0800514
515 mutex_lock(&port->lock);
516
517 hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
developer44e30b02021-07-02 11:12:14 +0800518 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800519
520 mutex_unlock(&port->lock);
521
developer44e30b02021-07-02 11:12:14 +0800522 if (hwirq < 0)
523 return -ENOSPC;
developerfd40db22021-04-29 10:08:25 +0800524
developer44e30b02021-07-02 11:12:14 +0800525 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
526 msi_set = &port->msi_sets[set_idx];
developerfd40db22021-04-29 10:08:25 +0800527
developer44e30b02021-07-02 11:12:14 +0800528 for (i = 0; i < nr_irqs; i++)
529 irq_domain_set_info(domain, virq + i, hwirq + i,
530 &mtk_msi_bottom_irq_chip, msi_set,
531 handle_edge_irq, NULL, NULL);
developerfd40db22021-04-29 10:08:25 +0800532
developer44e30b02021-07-02 11:12:14 +0800533 return 0;
developerfd40db22021-04-29 10:08:25 +0800534}
535
developer44e30b02021-07-02 11:12:14 +0800536static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
537 unsigned int virq, unsigned int nr_irqs)
developerfd40db22021-04-29 10:08:25 +0800538{
developer44e30b02021-07-02 11:12:14 +0800539 struct mtk_pcie_port *port = domain->host_data;
540 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
developerfd40db22021-04-29 10:08:25 +0800541
developer44e30b02021-07-02 11:12:14 +0800542 mutex_lock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800543
developer44e30b02021-07-02 11:12:14 +0800544 bitmap_release_region(port->msi_irq_in_use, data->hwirq,
545 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800546
developer44e30b02021-07-02 11:12:14 +0800547 mutex_unlock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800548
developer44e30b02021-07-02 11:12:14 +0800549 irq_domain_free_irqs_common(domain, virq, nr_irqs);
developerfd40db22021-04-29 10:08:25 +0800550}
551
developer44e30b02021-07-02 11:12:14 +0800552static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
553 .alloc = mtk_msi_bottom_domain_alloc,
554 .free = mtk_msi_bottom_domain_free,
developerfd40db22021-04-29 10:08:25 +0800555};
556
557static void mtk_intx_mask(struct irq_data *data)
558{
559 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800560 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800561 u32 val;
562
developer44e30b02021-07-02 11:12:14 +0800563 raw_spin_lock_irqsave(&port->irq_lock, flags);
564 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800565 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800566 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
567 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800568}
569
570static void mtk_intx_unmask(struct irq_data *data)
571{
572 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800573 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800574 u32 val;
575
developer44e30b02021-07-02 11:12:14 +0800576 raw_spin_lock_irqsave(&port->irq_lock, flags);
577 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800578 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800579 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
580 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800581}
582
developer44e30b02021-07-02 11:12:14 +0800583/**
584 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
585 * @data: pointer to chip specific data
586 *
587 * As an emulated level IRQ, its interrupt status will remain
588 * until the corresponding de-assert message is received; hence that
589 * the status can only be cleared when the interrupt has been serviced.
590 */
developerfd40db22021-04-29 10:08:25 +0800591static void mtk_intx_eoi(struct irq_data *data)
592{
593 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
594 unsigned long hwirq;
595
developerfd40db22021-04-29 10:08:25 +0800596 hwirq = data->hwirq + PCIE_INTX_SHIFT;
developer44e30b02021-07-02 11:12:14 +0800597 writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800598}
599
600static struct irq_chip mtk_intx_irq_chip = {
601 .irq_mask = mtk_intx_mask,
602 .irq_unmask = mtk_intx_unmask,
603 .irq_eoi = mtk_intx_eoi,
604 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800605 .name = "INTx",
developerfd40db22021-04-29 10:08:25 +0800606};
607
608static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
609 irq_hw_number_t hwirq)
610{
developer44e30b02021-07-02 11:12:14 +0800611 irq_set_chip_data(irq, domain->host_data);
developerfd40db22021-04-29 10:08:25 +0800612 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
613 handle_fasteoi_irq, "INTx");
developerfd40db22021-04-29 10:08:25 +0800614 return 0;
615}
616
617static const struct irq_domain_ops intx_domain_ops = {
618 .map = mtk_pcie_intx_map,
619};
620
developer44e30b02021-07-02 11:12:14 +0800621static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800622{
623 struct device *dev = port->dev;
developer44e30b02021-07-02 11:12:14 +0800624 struct device_node *intc_node, *node = dev->of_node;
625 int ret;
626
627 raw_spin_lock_init(&port->irq_lock);
developerfd40db22021-04-29 10:08:25 +0800628
629 /* Setup INTx */
630 intc_node = of_get_child_by_name(node, "interrupt-controller");
631 if (!intc_node) {
developer44e30b02021-07-02 11:12:14 +0800632 dev_err(dev, "missing interrupt-controller node\n");
developerfd40db22021-04-29 10:08:25 +0800633 return -ENODEV;
634 }
635
636 port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
637 &intx_domain_ops, port);
638 if (!port->intx_domain) {
developer44e30b02021-07-02 11:12:14 +0800639 dev_err(dev, "failed to create INTx IRQ domain\n");
developerfd40db22021-04-29 10:08:25 +0800640 return -ENODEV;
641 }
642
643 /* Setup MSI */
644 mutex_init(&port->lock);
645
developer44e30b02021-07-02 11:12:14 +0800646 port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
647 &mtk_msi_bottom_domain_ops, port);
648 if (!port->msi_bottom_domain) {
649 dev_err(dev, "failed to create MSI bottom domain\n");
developerfd40db22021-04-29 10:08:25 +0800650 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800651 goto err_msi_bottom_domain;
developerfd40db22021-04-29 10:08:25 +0800652 }
653
developer44e30b02021-07-02 11:12:14 +0800654 port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
655 &mtk_msi_domain_info,
656 port->msi_bottom_domain);
657 if (!port->msi_domain) {
658 dev_err(dev, "failed to create MSI domain\n");
developerfd40db22021-04-29 10:08:25 +0800659 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800660 goto err_msi_domain;
developerfd40db22021-04-29 10:08:25 +0800661 }
662
developer63dcf012021-09-02 10:14:03 +0800663 if (of_find_property(node, "direct_msi", NULL))
664 port->direct_msi_enable = true;
665 else
666 port->direct_msi_enable = false;
667
developerfd40db22021-04-29 10:08:25 +0800668 return 0;
669
developerfd40db22021-04-29 10:08:25 +0800670err_msi_domain:
developer44e30b02021-07-02 11:12:14 +0800671 irq_domain_remove(port->msi_bottom_domain);
672err_msi_bottom_domain:
developerfd40db22021-04-29 10:08:25 +0800673 irq_domain_remove(port->intx_domain);
674
675 return ret;
676}
677
678static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
679{
developerfd40db22021-04-29 10:08:25 +0800680 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
681
682 if (port->intx_domain)
683 irq_domain_remove(port->intx_domain);
684
685 if (port->msi_domain)
686 irq_domain_remove(port->msi_domain);
687
developer44e30b02021-07-02 11:12:14 +0800688 if (port->msi_bottom_domain)
689 irq_domain_remove(port->msi_bottom_domain);
developerfd40db22021-04-29 10:08:25 +0800690
691 irq_dispose_mapping(port->irq);
692}
693
developer44e30b02021-07-02 11:12:14 +0800694static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
695{
696 struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
697 unsigned long msi_enable, msi_status;
698 unsigned int virq;
699 irq_hw_number_t bit, hwirq;
700
701 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
702
703 do {
704 msi_status = readl_relaxed(msi_set->base +
705 PCIE_MSI_SET_STATUS_OFFSET);
706 msi_status &= msi_enable;
707 if (!msi_status)
708 break;
709
710 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
711 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
712 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
713 generic_handle_irq(virq);
714 }
715 } while (true);
716}
717
developerfd40db22021-04-29 10:08:25 +0800718static void mtk_pcie_irq_handler(struct irq_desc *desc)
719{
720 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
721 struct irq_chip *irqchip = irq_desc_get_chip(desc);
722 unsigned long status;
723 unsigned int virq;
724 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
725
726 chained_irq_enter(irqchip, desc);
727
developer44e30b02021-07-02 11:12:14 +0800728 status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
729 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
730 PCIE_INTX_SHIFT) {
731 virq = irq_find_mapping(port->intx_domain,
732 irq_bit - PCIE_INTX_SHIFT);
733 generic_handle_irq(virq);
developerfd40db22021-04-29 10:08:25 +0800734 }
735
developer44e30b02021-07-02 11:12:14 +0800736 irq_bit = PCIE_MSI_SHIFT;
737 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
738 PCIE_MSI_SHIFT) {
739 mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
740
741 writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800742 }
743
744 chained_irq_exit(irqchip, desc);
745}
746
developer63dcf012021-09-02 10:14:03 +0800747static void mtk_pcie_direct_msi_handler(struct irq_desc *desc)
748{
749 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
750 struct irq_chip *irqchip = irq_desc_get_chip(desc);
751 unsigned long msi_enable, msi_status;
752 unsigned int virq;
753 irq_hw_number_t hwirq;
754 int i, msi_bit = -EINVAL;
755
756 for (i = 0; i < PCIE_MSI_IRQS_PER_SET; i++) {
757 if (port->direct_msi[i] == irq_desc_get_irq(desc)) {
758 msi_bit = i;
759 break;
760 }
761 }
762
763 if (msi_bit == -EINVAL)
764 return;
765
766 chained_irq_enter(irqchip, desc);
767
768 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
769 struct mtk_msi_set *msi_set = &port->msi_sets[i];
770
771 msi_status = readl_relaxed(msi_set->base +
772 PCIE_MSI_SET_STATUS_OFFSET);
773 msi_enable = readl_relaxed(msi_set->base +
774 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
775 msi_status &= msi_enable;
776 msi_status &= BIT(msi_bit);
777 if (!msi_status)
778 continue;
779
780 hwirq = msi_bit + i * PCIE_MSI_IRQS_PER_SET;
781 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
782 generic_handle_irq(virq);
783 }
784
785 chained_irq_exit(irqchip, desc);
786}
787
developer44e30b02021-07-02 11:12:14 +0800788static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800789{
790 struct device *dev = port->dev;
791 struct platform_device *pdev = to_platform_device(dev);
developer63dcf012021-09-02 10:14:03 +0800792 int err, i;
developerfd40db22021-04-29 10:08:25 +0800793
developer44e30b02021-07-02 11:12:14 +0800794 err = mtk_pcie_init_irq_domains(port);
795 if (err)
developerfd40db22021-04-29 10:08:25 +0800796 return err;
developerfd40db22021-04-29 10:08:25 +0800797
798 port->irq = platform_get_irq(pdev, 0);
799 if (port->irq < 0)
800 return port->irq;
801
802 irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
803
developer63dcf012021-09-02 10:14:03 +0800804 if (port->direct_msi_enable) {
805 mtk_msi_bottom_irq_chip.irq_set_affinity =
806 mtk_pcie_set_msi_affinity;
807
808 for (i = 0; i < PCIE_MSI_IRQS_PER_SET; i++) {
809 port->direct_msi[i] = platform_get_irq(pdev, i + 1);
810 irq_set_chained_handler_and_data(port->direct_msi[i],
811 mtk_pcie_direct_msi_handler, port);
812 }
813 }
814
developerfd40db22021-04-29 10:08:25 +0800815 return 0;
816}
817
developer44e30b02021-07-02 11:12:14 +0800818static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800819{
developer44e30b02021-07-02 11:12:14 +0800820 struct device *dev = port->dev;
821 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
822 struct platform_device *pdev = to_platform_device(dev);
823 struct list_head *windows = &host->windows;
824 struct resource *regs, *bus;
developerfd40db22021-04-29 10:08:25 +0800825 int ret;
826
developer44e30b02021-07-02 11:12:14 +0800827 ret = pci_parse_request_of_pci_ranges(dev, windows, &bus);
828 if (ret) {
829 dev_err(dev, "failed to parse pci ranges\n");
830 return ret;
developerfd40db22021-04-29 10:08:25 +0800831 }
832
developer44e30b02021-07-02 11:12:14 +0800833 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
834 port->base = devm_ioremap_resource(dev, regs);
835 if (IS_ERR(port->base)) {
836 dev_err(dev, "failed to map register base\n");
837 return PTR_ERR(port->base);
838 }
839
840 port->reg_base = regs->start;
841
842 port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
843 if (IS_ERR(port->phy_reset)) {
844 ret = PTR_ERR(port->phy_reset);
845 if (ret != -EPROBE_DEFER)
846 dev_err(dev, "failed to get PHY reset\n");
847
848 return ret;
849 }
850
851 port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
852 if (IS_ERR(port->mac_reset)) {
853 ret = PTR_ERR(port->mac_reset);
854 if (ret != -EPROBE_DEFER)
855 dev_err(dev, "failed to get MAC reset\n");
856
developerfd40db22021-04-29 10:08:25 +0800857 return ret;
858 }
859
developer44e30b02021-07-02 11:12:14 +0800860 port->phy = devm_phy_optional_get(dev, "pcie-phy");
861 if (IS_ERR(port->phy)) {
862 ret = PTR_ERR(port->phy);
863 if (ret != -EPROBE_DEFER)
864 dev_err(dev, "failed to get PHY\n");
865
866 return ret;
867 }
868
869 port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
870 if (port->num_clks < 0) {
871 dev_err(dev, "failed to get clocks\n");
872 return port->num_clks;
873 }
874
developerfd40db22021-04-29 10:08:25 +0800875 return 0;
876}
877
878static int mtk_pcie_power_up(struct mtk_pcie_port *port)
879{
880 struct device *dev = port->dev;
881 int err;
882
developerfd40db22021-04-29 10:08:25 +0800883 /* PHY power on and enable pipe clock */
developerfd40db22021-04-29 10:08:25 +0800884 reset_control_deassert(port->phy_reset);
885
developerfd40db22021-04-29 10:08:25 +0800886 err = phy_init(port->phy);
887 if (err) {
developer44e30b02021-07-02 11:12:14 +0800888 dev_err(dev, "failed to initialize PHY\n");
developerfd40db22021-04-29 10:08:25 +0800889 goto err_phy_init;
890 }
891
developer44e30b02021-07-02 11:12:14 +0800892 err = phy_power_on(port->phy);
893 if (err) {
894 dev_err(dev, "failed to power on PHY\n");
895 goto err_phy_on;
developerfd40db22021-04-29 10:08:25 +0800896 }
897
developer44e30b02021-07-02 11:12:14 +0800898 /* MAC power on and enable transaction layer clocks */
developerfd40db22021-04-29 10:08:25 +0800899 reset_control_deassert(port->mac_reset);
900
developerfd40db22021-04-29 10:08:25 +0800901 pm_runtime_enable(dev);
902 pm_runtime_get_sync(dev);
903
developer44e30b02021-07-02 11:12:14 +0800904 err = clk_bulk_prepare_enable(port->num_clks, port->clks);
developerfd40db22021-04-29 10:08:25 +0800905 if (err) {
developer44e30b02021-07-02 11:12:14 +0800906 dev_err(dev, "failed to enable clocks\n");
developerfd40db22021-04-29 10:08:25 +0800907 goto err_clk_init;
908 }
909
910 return 0;
911
912err_clk_init:
913 pm_runtime_put_sync(dev);
914 pm_runtime_disable(dev);
915 reset_control_assert(port->mac_reset);
developerfd40db22021-04-29 10:08:25 +0800916 phy_power_off(port->phy);
917err_phy_on:
developer44e30b02021-07-02 11:12:14 +0800918 phy_exit(port->phy);
919err_phy_init:
developerfd40db22021-04-29 10:08:25 +0800920 reset_control_assert(port->phy_reset);
921
922 return err;
923}
924
925static void mtk_pcie_power_down(struct mtk_pcie_port *port)
926{
927 clk_bulk_disable_unprepare(port->num_clks, port->clks);
928
929 pm_runtime_put_sync(port->dev);
930 pm_runtime_disable(port->dev);
931 reset_control_assert(port->mac_reset);
932
933 phy_power_off(port->phy);
934 phy_exit(port->phy);
935 reset_control_assert(port->phy_reset);
936}
937
938static int mtk_pcie_setup(struct mtk_pcie_port *port)
939{
developerfd40db22021-04-29 10:08:25 +0800940 int err;
941
developer44e30b02021-07-02 11:12:14 +0800942 err = mtk_pcie_parse_port(port);
developerfd40db22021-04-29 10:08:25 +0800943 if (err)
944 return err;
945
developerfd40db22021-04-29 10:08:25 +0800946 /* Don't touch the hardware registers before power up */
947 err = mtk_pcie_power_up(port);
948 if (err)
949 return err;
950
951 /* Try link up */
952 err = mtk_pcie_startup_port(port);
developer44e30b02021-07-02 11:12:14 +0800953 if (err)
developerfd40db22021-04-29 10:08:25 +0800954 goto err_setup;
developerfd40db22021-04-29 10:08:25 +0800955
developer44e30b02021-07-02 11:12:14 +0800956 err = mtk_pcie_setup_irq(port);
developerfd40db22021-04-29 10:08:25 +0800957 if (err)
958 goto err_setup;
959
developerfd40db22021-04-29 10:08:25 +0800960 return 0;
961
962err_setup:
963 mtk_pcie_power_down(port);
964
965 return err;
966}
967
developerfd40db22021-04-29 10:08:25 +0800968static int mtk_pcie_probe(struct platform_device *pdev)
969{
970 struct device *dev = &pdev->dev;
971 struct mtk_pcie_port *port;
972 struct pci_host_bridge *host;
973 int err;
974
975 host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
976 if (!host)
977 return -ENOMEM;
978
979 port = pci_host_bridge_priv(host);
980
981 port->dev = dev;
982 platform_set_drvdata(pdev, port);
983
984 err = mtk_pcie_setup(port);
985 if (err)
developer44e30b02021-07-02 11:12:14 +0800986 return err;
developerfd40db22021-04-29 10:08:25 +0800987
developerfd40db22021-04-29 10:08:25 +0800988 host->dev.parent = port->dev;
developer44e30b02021-07-02 11:12:14 +0800989 host->ops = &mtk_pcie_ops;
developerfd40db22021-04-29 10:08:25 +0800990 host->map_irq = of_irq_parse_and_map_pci;
991 host->swizzle_irq = pci_common_swizzle;
developerfd40db22021-04-29 10:08:25 +0800992 host->sysdata = port;
993
994 err = pci_host_probe(host);
995 if (err) {
996 mtk_pcie_irq_teardown(port);
997 mtk_pcie_power_down(port);
developer44e30b02021-07-02 11:12:14 +0800998 return err;
developerfd40db22021-04-29 10:08:25 +0800999 }
1000
1001 return 0;
developerfd40db22021-04-29 10:08:25 +08001002}
1003
1004static int mtk_pcie_remove(struct platform_device *pdev)
1005{
1006 struct mtk_pcie_port *port = platform_get_drvdata(pdev);
1007 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
1008
1009 pci_lock_rescan_remove();
1010 pci_stop_root_bus(host->bus);
1011 pci_remove_root_bus(host->bus);
1012 pci_unlock_rescan_remove();
1013
1014 mtk_pcie_irq_teardown(port);
1015 mtk_pcie_power_down(port);
1016
1017 return 0;
1018}
1019
developer44e30b02021-07-02 11:12:14 +08001020static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
1021{
1022 int i;
1023
1024 raw_spin_lock(&port->irq_lock);
1025
1026 port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
1027
1028 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1029 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1030
developer63dcf012021-09-02 10:14:03 +08001031 if (port->direct_msi_enable)
1032 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1033 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
1034 else
1035 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1036 PCIE_MSI_SET_ENABLE_OFFSET);
developer44e30b02021-07-02 11:12:14 +08001037 }
1038
1039 raw_spin_unlock(&port->irq_lock);
1040}
1041
1042static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
1043{
1044 int i;
1045
1046 raw_spin_lock(&port->irq_lock);
1047
1048 writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
1049
1050 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1051 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1052
developer63dcf012021-09-02 10:14:03 +08001053 if (port->direct_msi_enable)
1054 writel_relaxed(msi_set->saved_irq_state, msi_set->base +
1055 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
1056 else
1057 writel_relaxed(msi_set->saved_irq_state, msi_set->base +
1058 PCIE_MSI_SET_ENABLE_OFFSET);
developer44e30b02021-07-02 11:12:14 +08001059 }
1060
1061 raw_spin_unlock(&port->irq_lock);
1062}
1063
developerfd40db22021-04-29 10:08:25 +08001064static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
1065{
1066 u32 val;
1067
developer44e30b02021-07-02 11:12:14 +08001068 val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001069 val |= PCIE_TURN_OFF_LINK;
developer44e30b02021-07-02 11:12:14 +08001070 writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001071
1072 /* Check the link is L2 */
1073 return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
1074 (PCIE_LTSSM_STATE(val) ==
1075 PCIE_LTSSM_STATE_L2_IDLE), 20,
1076 50 * USEC_PER_MSEC);
1077}
1078
1079static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1080{
1081 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1082 int err;
1083 u32 val;
1084
1085 /* Trigger link to L2 state */
1086 err = mtk_pcie_turn_off_link(port);
1087 if (err) {
developer44e30b02021-07-02 11:12:14 +08001088 dev_err(port->dev, "cannot enter L2 state\n");
developerfd40db22021-04-29 10:08:25 +08001089 return err;
1090 }
1091
1092 /* Pull down the PERST# pin */
developer44e30b02021-07-02 11:12:14 +08001093 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001094 val |= PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +08001095 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001096
developer44e30b02021-07-02 11:12:14 +08001097 dev_dbg(port->dev, "entered L2 states successfully");
developerfd40db22021-04-29 10:08:25 +08001098
developer44e30b02021-07-02 11:12:14 +08001099 mtk_pcie_irq_save(port);
1100 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001101
1102 return 0;
1103}
1104
1105static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1106{
1107 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1108 int err;
1109
developer44e30b02021-07-02 11:12:14 +08001110 err = mtk_pcie_power_up(port);
1111 if (err)
developerfd40db22021-04-29 10:08:25 +08001112 return err;
developerfd40db22021-04-29 10:08:25 +08001113
1114 err = mtk_pcie_startup_port(port);
1115 if (err) {
developer44e30b02021-07-02 11:12:14 +08001116 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001117 return err;
1118 }
1119
developer44e30b02021-07-02 11:12:14 +08001120 mtk_pcie_irq_restore(port);
developerfd40db22021-04-29 10:08:25 +08001121
1122 return 0;
1123}
1124
1125static const struct dev_pm_ops mtk_pcie_pm_ops = {
1126 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1127 mtk_pcie_resume_noirq)
1128};
1129
1130static const struct of_device_id mtk_pcie_of_match[] = {
1131 { .compatible = "mediatek,mt8192-pcie" },
developer44e30b02021-07-02 11:12:14 +08001132 { .compatible = "mediatek,mt7986-pcie" },
developerfd40db22021-04-29 10:08:25 +08001133 {},
1134};
1135
1136static struct platform_driver mtk_pcie_driver = {
1137 .probe = mtk_pcie_probe,
1138 .remove = mtk_pcie_remove,
1139 .driver = {
1140 .name = "mtk-pcie",
1141 .of_match_table = mtk_pcie_of_match,
1142 .pm = &mtk_pcie_pm_ops,
1143 },
1144};
1145
1146module_platform_driver(mtk_pcie_driver);
1147MODULE_LICENSE("GPL v2");