blob: 4012dbabb272af9ef2aaac0ea1f4231c9088619b [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/iopoll.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
developerfd40db22021-04-29 10:08:25 +080018#include <linux/of_pci.h>
developerfd40db22021-04-29 10:08:25 +080019#include <linux/pci.h>
20#include <linux/phy/phy.h>
21#include <linux/platform_device.h>
22#include <linux/pm_domain.h>
23#include <linux/pm_runtime.h>
24#include <linux/reset.h>
25
26#include "../pci.h"
27
28#define PCIE_SETTING_REG 0x80
29#define PCIE_PCI_IDS_1 0x9c
30#define PCI_CLASS(class) (class << 8)
31#define PCIE_RC_MODE BIT(0)
32
33#define PCIE_CFGNUM_REG 0x140
34#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
35#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
36#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
37#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
38#define PCIE_CFG_OFFSET_ADDR 0x1000
39#define PCIE_CFG_HEADER(bus, devfn) \
40 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
41
42#define PCIE_RST_CTRL_REG 0x148
43#define PCIE_MAC_RSTB BIT(0)
44#define PCIE_PHY_RSTB BIT(1)
45#define PCIE_BRG_RSTB BIT(2)
46#define PCIE_PE_RSTB BIT(3)
47
48#define PCIE_LTSSM_STATUS_REG 0x150
49#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
50#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
51#define PCIE_LTSSM_STATE_L2_IDLE 0x14
52
53#define PCIE_LINK_STATUS_REG 0x154
54#define PCIE_PORT_LINKUP BIT(8)
55
56#define PCIE_MSI_SET_NUM 8
57#define PCIE_MSI_IRQS_PER_SET 32
58#define PCIE_MSI_IRQS_NUM \
developer44e30b02021-07-02 11:12:14 +080059 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
developerfd40db22021-04-29 10:08:25 +080060
61#define PCIE_INT_ENABLE_REG 0x180
developer44e30b02021-07-02 11:12:14 +080062#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
developerfd40db22021-04-29 10:08:25 +080063#define PCIE_MSI_SHIFT 8
64#define PCIE_INTX_SHIFT 24
developer44e30b02021-07-02 11:12:14 +080065#define PCIE_INTX_ENABLE \
66 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
developerfd40db22021-04-29 10:08:25 +080067
68#define PCIE_INT_STATUS_REG 0x184
69#define PCIE_MSI_SET_ENABLE_REG 0x190
developer44e30b02021-07-02 11:12:14 +080070#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
71
72#define PCIE_MSI_SET_BASE_REG 0xc00
73#define PCIE_MSI_SET_OFFSET 0x10
74#define PCIE_MSI_SET_STATUS_OFFSET 0x04
75#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
developer63dcf012021-09-02 10:14:03 +080076#define PCIE_MSI_SET_GRP1_ENABLE_OFFSET 0x0c
developer44e30b02021-07-02 11:12:14 +080077
78#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
79#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
developerfd40db22021-04-29 10:08:25 +080080
81#define PCIE_ICMD_PM_REG 0x198
82#define PCIE_TURN_OFF_LINK BIT(4)
83
developerfd40db22021-04-29 10:08:25 +080084#define PCIE_TRANS_TABLE_BASE_REG 0x800
85#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
86#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
87#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
88#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
89#define PCIE_ATR_TLB_SET_OFFSET 0x20
90
91#define PCIE_MAX_TRANS_TABLES 8
92#define PCIE_ATR_EN BIT(0)
93#define PCIE_ATR_SIZE(size) \
94 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
95#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
96#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
97#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
98#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
99#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
100#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
101
102/**
developer44e30b02021-07-02 11:12:14 +0800103 * struct mtk_msi_set - MSI information for each set
developerfd40db22021-04-29 10:08:25 +0800104 * @base: IO mapped register base
developerfd40db22021-04-29 10:08:25 +0800105 * @msg_addr: MSI message address
developer44e30b02021-07-02 11:12:14 +0800106 * @saved_irq_state: IRQ enable state saved at suspend time
developerfd40db22021-04-29 10:08:25 +0800107 */
developer44e30b02021-07-02 11:12:14 +0800108struct mtk_msi_set {
developerfd40db22021-04-29 10:08:25 +0800109 void __iomem *base;
developerfd40db22021-04-29 10:08:25 +0800110 phys_addr_t msg_addr;
developer44e30b02021-07-02 11:12:14 +0800111 u32 saved_irq_state;
developerfd40db22021-04-29 10:08:25 +0800112};
113
114/**
115 * struct mtk_pcie_port - PCIe port information
developer44e30b02021-07-02 11:12:14 +0800116 * @dev: pointer to PCIe device
developerfd40db22021-04-29 10:08:25 +0800117 * @base: IO mapped register base
developer44e30b02021-07-02 11:12:14 +0800118 * @reg_base: physical register base
119 * @mac_reset: MAC reset control
120 * @phy_reset: PHY reset control
developerfd40db22021-04-29 10:08:25 +0800121 * @phy: PHY controller block
122 * @clks: PCIe clocks
123 * @num_clks: PCIe clocks count for this port
124 * @irq: PCIe controller interrupt number
developer44e30b02021-07-02 11:12:14 +0800125 * @saved_irq_state: IRQ enable state saved at suspend time
126 * @irq_lock: lock protecting IRQ register access
developerfd40db22021-04-29 10:08:25 +0800127 * @intx_domain: legacy INTx IRQ domain
128 * @msi_domain: MSI IRQ domain
developer44e30b02021-07-02 11:12:14 +0800129 * @msi_bottom_domain: MSI IRQ bottom domain
130 * @msi_sets: MSI sets information
developerfd40db22021-04-29 10:08:25 +0800131 * @lock: lock protecting IRQ bit map
132 * @msi_irq_in_use: bit map for assigned MSI IRQ
133 */
134struct mtk_pcie_port {
135 struct device *dev;
136 void __iomem *base;
137 phys_addr_t reg_base;
138 struct reset_control *mac_reset;
139 struct reset_control *phy_reset;
140 struct phy *phy;
141 struct clk_bulk_data *clks;
142 int num_clks;
developerfd40db22021-04-29 10:08:25 +0800143
144 int irq;
developerca1c6b22023-04-26 19:51:06 +0800145 int max_link_width;
developer63dcf012021-09-02 10:14:03 +0800146 int direct_msi_enable;
147 int direct_msi[PCIE_MSI_IRQS_PER_SET];
developer44e30b02021-07-02 11:12:14 +0800148 u32 saved_irq_state;
149 raw_spinlock_t irq_lock;
developerfd40db22021-04-29 10:08:25 +0800150 struct irq_domain *intx_domain;
151 struct irq_domain *msi_domain;
developer44e30b02021-07-02 11:12:14 +0800152 struct irq_domain *msi_bottom_domain;
153 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
developerfd40db22021-04-29 10:08:25 +0800154 struct mutex lock;
155 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
156};
157
158/**
developer44e30b02021-07-02 11:12:14 +0800159 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
developerfd40db22021-04-29 10:08:25 +0800160 * @bus: PCI bus to query
161 * @devfn: device/function number
162 * @where: offset in config space
163 * @size: data size in TLP header
164 *
165 * Set byte enable field and device information in configuration TLP header.
166 */
167static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
168 int where, int size)
169{
170 struct mtk_pcie_port *port = bus->sysdata;
171 int bytes;
172 u32 val;
173
174 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
175
176 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
177 PCIE_CFG_HEADER(bus->number, devfn);
178
developer44e30b02021-07-02 11:12:14 +0800179 writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
developerfd40db22021-04-29 10:08:25 +0800180}
181
182static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
183 int where)
184{
185 struct mtk_pcie_port *port = bus->sysdata;
186
187 return port->base + PCIE_CFG_OFFSET_ADDR + where;
188}
189
190static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
191 int where, int size, u32 *val)
192{
193 mtk_pcie_config_tlp_header(bus, devfn, where, size);
194
195 return pci_generic_config_read32(bus, devfn, where, size, val);
196}
197
198static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
199 int where, int size, u32 val)
200{
201 mtk_pcie_config_tlp_header(bus, devfn, where, size);
202
203 if (size <= 2)
204 val <<= (where & 0x3) * 8;
205
206 return pci_generic_config_write32(bus, devfn, where, 4, val);
207}
208
209static struct pci_ops mtk_pcie_ops = {
210 .map_bus = mtk_pcie_map_bus,
211 .read = mtk_pcie_config_read,
212 .write = mtk_pcie_config_write,
213};
214
developerca1c6b22023-04-26 19:51:06 +0800215/**
216 * This function will try to find the limitation of link width by finding
217 * a property called "max-link-width" of the given device node.
218 *
219 * @node: device tree node with the max link width information
220 *
221 * Returns the associated max link width from DT, or a negative value if the
222 * required property is not found or is invalid.
223 */
224int of_pci_get_max_link_width(struct device_node *node)
225{
226 u32 max_link_width = 0;
227
228 if (of_property_read_u32(node, "max-link-width", &max_link_width) ||
229 max_link_width == 0 || max_link_width > 2)
230 return -EINVAL;
231
232 return max_link_width;
233}
234
developerfd40db22021-04-29 10:08:25 +0800235static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
236 resource_size_t cpu_addr,
237 resource_size_t pci_addr,
238 resource_size_t size,
239 unsigned long type, int num)
240{
241 void __iomem *table;
developer44e30b02021-07-02 11:12:14 +0800242 u32 val;
developerfd40db22021-04-29 10:08:25 +0800243
244 if (num >= PCIE_MAX_TRANS_TABLES) {
developer44e30b02021-07-02 11:12:14 +0800245 dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
246 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
developerfd40db22021-04-29 10:08:25 +0800247 return -ENODEV;
248 }
249
250 table = port->base + PCIE_TRANS_TABLE_BASE_REG +
251 num * PCIE_ATR_TLB_SET_OFFSET;
252
developer44e30b02021-07-02 11:12:14 +0800253 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
254 table);
255 writel_relaxed(upper_32_bits(cpu_addr),
256 table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
257 writel_relaxed(lower_32_bits(pci_addr),
258 table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
259 writel_relaxed(upper_32_bits(pci_addr),
260 table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800261
262 if (type == IORESOURCE_IO)
263 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
264 else
265 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
266
developer44e30b02021-07-02 11:12:14 +0800267 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800268
269 return 0;
270}
271
developer44e30b02021-07-02 11:12:14 +0800272static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
273{
274 int i;
275 u32 val;
276
277 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
278 struct mtk_msi_set *msi_set = &port->msi_sets[i];
279
280 msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
281 i * PCIE_MSI_SET_OFFSET;
282 msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
283 i * PCIE_MSI_SET_OFFSET;
284
285 /* Configure the MSI capture address */
286 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
287 writel_relaxed(upper_32_bits(msi_set->msg_addr),
288 port->base + PCIE_MSI_SET_ADDR_HI_BASE +
289 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
290 }
291
292 val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
293 val |= PCIE_MSI_SET_ENABLE;
294 writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
295
developer63dcf012021-09-02 10:14:03 +0800296 if (!port->direct_msi_enable) {
297 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
298 val |= PCIE_MSI_ENABLE;
299 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
300 }
developer44e30b02021-07-02 11:12:14 +0800301}
302
developerfd40db22021-04-29 10:08:25 +0800303static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
304{
305 struct resource_entry *entry;
306 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
307 unsigned int table_index = 0;
308 int err;
309 u32 val;
310
311 /* Set as RC mode */
developer44e30b02021-07-02 11:12:14 +0800312 val = readl_relaxed(port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800313 val |= PCIE_RC_MODE;
developer44e30b02021-07-02 11:12:14 +0800314 writel_relaxed(val, port->base + PCIE_SETTING_REG);
developerfd40db22021-04-29 10:08:25 +0800315
developerca1c6b22023-04-26 19:51:06 +0800316 /* Set link width*/
317 val = readl_relaxed(port->base + PCIE_SETTING_REG);
318 if (port->max_link_width == 1) {
319 val &= ~GENMASK(11, 8);
320 } else if (port->max_link_width == 2) {
321 val &= ~GENMASK(11, 8);
322 val |= BIT(8);
323 }
324 writel_relaxed(val, port->base + PCIE_SETTING_REG);
325
developerfd40db22021-04-29 10:08:25 +0800326 /* Set class code */
developer44e30b02021-07-02 11:12:14 +0800327 val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
developerfd40db22021-04-29 10:08:25 +0800328 val &= ~GENMASK(31, 8);
329 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
developer44e30b02021-07-02 11:12:14 +0800330 writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
331
332 /* Mask all INTx interrupts */
333 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
334 val &= ~PCIE_INTX_ENABLE;
335 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800336
337 /* Assert all reset signals */
developer44e30b02021-07-02 11:12:14 +0800338 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800339 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +0800340 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800341
developer44e30b02021-07-02 11:12:14 +0800342 /*
343 * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
344 * and 2.2.1 (Initial Power-Up (G3 to S0)).
345 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
346 * for the power and clock to become stable.
347 */
348 msleep(100);
developerfd40db22021-04-29 10:08:25 +0800349
developer44e30b02021-07-02 11:12:14 +0800350 /* De-assert reset signals */
351 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
352 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +0800353
354 /* Check if the link is up or not */
355 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
developer44e30b02021-07-02 11:12:14 +0800356 !!(val & PCIE_PORT_LINKUP), 20,
357 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
developerfd40db22021-04-29 10:08:25 +0800358 if (err) {
developer44e30b02021-07-02 11:12:14 +0800359 val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
360 dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
developerfd40db22021-04-29 10:08:25 +0800361 return err;
362 }
363
developer44e30b02021-07-02 11:12:14 +0800364 mtk_pcie_enable_msi(port);
365
developerfd40db22021-04-29 10:08:25 +0800366 /* Set PCIe translation windows */
367 resource_list_for_each_entry(entry, &host->windows) {
368 struct resource *res = entry->res;
369 unsigned long type = resource_type(res);
370 resource_size_t cpu_addr;
371 resource_size_t pci_addr;
372 resource_size_t size;
373 const char *range_type;
374
375 if (type == IORESOURCE_IO) {
376 cpu_addr = pci_pio_to_address(res->start);
377 range_type = "IO";
378 } else if (type == IORESOURCE_MEM) {
379 cpu_addr = res->start;
380 range_type = "MEM";
381 } else {
382 continue;
383 }
384
385 pci_addr = res->start - entry->offset;
386 size = resource_size(res);
387 err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
388 type, table_index);
389 if (err)
390 return err;
391
392 dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
developer44e30b02021-07-02 11:12:14 +0800393 range_type, table_index, (unsigned long long)cpu_addr,
394 (unsigned long long)pci_addr, (unsigned long long)size);
developerfd40db22021-04-29 10:08:25 +0800395
396 table_index++;
397 }
398
399 return 0;
400}
401
developer63dcf012021-09-02 10:14:03 +0800402static int mtk_pcie_set_msi_affinity(struct irq_data *data,
403 const struct cpumask *mask, bool force)
404{
405 struct mtk_pcie_port *port = data->domain->host_data;
406 struct irq_data *port_data;
407 struct irq_chip *port_chip;
408 int msi_bit, irq, ret;
409
410 msi_bit = data->hwirq % PCIE_MSI_IRQS_PER_SET;
411 irq = port->direct_msi[msi_bit];
412
413 port_data = irq_get_irq_data(irq);
414 port_chip = irq_data_get_irq_chip(port_data);
415 if (!port_chip || !port_chip->irq_set_affinity)
416 return -EINVAL;
417
418 ret = port_chip->irq_set_affinity(port_data, mask, force);
419
420 irq_data_update_effective_affinity(data, mask);
421
422 return ret;
423}
424
developerfd40db22021-04-29 10:08:25 +0800425static int mtk_pcie_set_affinity(struct irq_data *data,
426 const struct cpumask *mask, bool force)
427{
developer44e30b02021-07-02 11:12:14 +0800428 return -EINVAL;
429}
developerfd40db22021-04-29 10:08:25 +0800430
developer44e30b02021-07-02 11:12:14 +0800431static void mtk_pcie_msi_irq_mask(struct irq_data *data)
432{
433 pci_msi_mask_irq(data);
434 irq_chip_mask_parent(data);
435}
developerfd40db22021-04-29 10:08:25 +0800436
developer44e30b02021-07-02 11:12:14 +0800437static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
438{
439 pci_msi_unmask_irq(data);
440 irq_chip_unmask_parent(data);
441}
developerfd40db22021-04-29 10:08:25 +0800442
developer44e30b02021-07-02 11:12:14 +0800443static struct irq_chip mtk_msi_irq_chip = {
444 .irq_ack = irq_chip_ack_parent,
445 .irq_mask = mtk_pcie_msi_irq_mask,
446 .irq_unmask = mtk_pcie_msi_irq_unmask,
447 .name = "MSI",
448};
developerfd40db22021-04-29 10:08:25 +0800449
developer44e30b02021-07-02 11:12:14 +0800450static struct msi_domain_info mtk_msi_domain_info = {
451 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
452 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
453 .chip = &mtk_msi_irq_chip,
454};
developerfd40db22021-04-29 10:08:25 +0800455
456static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
457{
developer44e30b02021-07-02 11:12:14 +0800458 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
459 struct mtk_pcie_port *port = data->domain->host_data;
developerfd40db22021-04-29 10:08:25 +0800460 unsigned long hwirq;
461
developerfd40db22021-04-29 10:08:25 +0800462 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
463
developer44e30b02021-07-02 11:12:14 +0800464 msg->address_hi = upper_32_bits(msi_set->msg_addr);
465 msg->address_lo = lower_32_bits(msi_set->msg_addr);
developerfd40db22021-04-29 10:08:25 +0800466 msg->data = hwirq;
467 dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
468 hwirq, msg->address_hi, msg->address_lo, msg->data);
469}
470
developer44e30b02021-07-02 11:12:14 +0800471static void mtk_msi_bottom_irq_ack(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800472{
developer44e30b02021-07-02 11:12:14 +0800473 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
developerfd40db22021-04-29 10:08:25 +0800474 unsigned long hwirq;
475
developerfd40db22021-04-29 10:08:25 +0800476 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
477
developer44e30b02021-07-02 11:12:14 +0800478 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
developerfd40db22021-04-29 10:08:25 +0800479}
480
developer44e30b02021-07-02 11:12:14 +0800481static void mtk_msi_bottom_irq_mask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800482{
developer44e30b02021-07-02 11:12:14 +0800483 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
484 struct mtk_pcie_port *port = data->domain->host_data;
485 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800486 u32 val;
487
developerfd40db22021-04-29 10:08:25 +0800488 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
489
developer44e30b02021-07-02 11:12:14 +0800490 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer63dcf012021-09-02 10:14:03 +0800491 if (port->direct_msi_enable) {
492 val = readl_relaxed(msi_set->base +
493 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
494 val &= ~BIT(hwirq);
495 writel_relaxed(val, msi_set->base +
496 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
497 } else {
498 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
499 val &= ~BIT(hwirq);
500 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
501 }
developer44e30b02021-07-02 11:12:14 +0800502 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800503}
504
developer44e30b02021-07-02 11:12:14 +0800505static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
developerfd40db22021-04-29 10:08:25 +0800506{
developer44e30b02021-07-02 11:12:14 +0800507 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
508 struct mtk_pcie_port *port = data->domain->host_data;
509 unsigned long hwirq, flags;
developerfd40db22021-04-29 10:08:25 +0800510 u32 val;
511
developerfd40db22021-04-29 10:08:25 +0800512 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
513
developer44e30b02021-07-02 11:12:14 +0800514 raw_spin_lock_irqsave(&port->irq_lock, flags);
developer63dcf012021-09-02 10:14:03 +0800515 if (port->direct_msi_enable) {
516 val = readl_relaxed(msi_set->base +
517 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
518 val |= BIT(hwirq);
519 writel_relaxed(val, msi_set->base +
520 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
521 } else {
522 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
523 val |= BIT(hwirq);
524 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
525 }
developer44e30b02021-07-02 11:12:14 +0800526 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800527}
528
developer44e30b02021-07-02 11:12:14 +0800529static struct irq_chip mtk_msi_bottom_irq_chip = {
530 .irq_ack = mtk_msi_bottom_irq_ack,
531 .irq_mask = mtk_msi_bottom_irq_mask,
532 .irq_unmask = mtk_msi_bottom_irq_unmask,
developerfd40db22021-04-29 10:08:25 +0800533 .irq_compose_msi_msg = mtk_compose_msi_msg,
developerfd40db22021-04-29 10:08:25 +0800534 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800535 .name = "MSI",
developerfd40db22021-04-29 10:08:25 +0800536};
537
developer44e30b02021-07-02 11:12:14 +0800538static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
539 unsigned int virq, unsigned int nr_irqs,
540 void *arg)
developerfd40db22021-04-29 10:08:25 +0800541{
developer44e30b02021-07-02 11:12:14 +0800542 struct mtk_pcie_port *port = domain->host_data;
543 struct mtk_msi_set *msi_set;
544 int i, hwirq, set_idx;
developerfd40db22021-04-29 10:08:25 +0800545
546 mutex_lock(&port->lock);
547
548 hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
developer44e30b02021-07-02 11:12:14 +0800549 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800550
551 mutex_unlock(&port->lock);
552
developer44e30b02021-07-02 11:12:14 +0800553 if (hwirq < 0)
554 return -ENOSPC;
developerfd40db22021-04-29 10:08:25 +0800555
developer44e30b02021-07-02 11:12:14 +0800556 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
557 msi_set = &port->msi_sets[set_idx];
developerfd40db22021-04-29 10:08:25 +0800558
developer44e30b02021-07-02 11:12:14 +0800559 for (i = 0; i < nr_irqs; i++)
560 irq_domain_set_info(domain, virq + i, hwirq + i,
561 &mtk_msi_bottom_irq_chip, msi_set,
562 handle_edge_irq, NULL, NULL);
developerfd40db22021-04-29 10:08:25 +0800563
developer44e30b02021-07-02 11:12:14 +0800564 return 0;
developerfd40db22021-04-29 10:08:25 +0800565}
566
developer44e30b02021-07-02 11:12:14 +0800567static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
568 unsigned int virq, unsigned int nr_irqs)
developerfd40db22021-04-29 10:08:25 +0800569{
developer44e30b02021-07-02 11:12:14 +0800570 struct mtk_pcie_port *port = domain->host_data;
571 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
developerfd40db22021-04-29 10:08:25 +0800572
developer44e30b02021-07-02 11:12:14 +0800573 mutex_lock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800574
developer44e30b02021-07-02 11:12:14 +0800575 bitmap_release_region(port->msi_irq_in_use, data->hwirq,
576 order_base_2(nr_irqs));
developerfd40db22021-04-29 10:08:25 +0800577
developer44e30b02021-07-02 11:12:14 +0800578 mutex_unlock(&port->lock);
developerfd40db22021-04-29 10:08:25 +0800579
developer44e30b02021-07-02 11:12:14 +0800580 irq_domain_free_irqs_common(domain, virq, nr_irqs);
developerfd40db22021-04-29 10:08:25 +0800581}
582
developer44e30b02021-07-02 11:12:14 +0800583static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
584 .alloc = mtk_msi_bottom_domain_alloc,
585 .free = mtk_msi_bottom_domain_free,
developerfd40db22021-04-29 10:08:25 +0800586};
587
588static void mtk_intx_mask(struct irq_data *data)
589{
590 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800591 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800592 u32 val;
593
developer44e30b02021-07-02 11:12:14 +0800594 raw_spin_lock_irqsave(&port->irq_lock, flags);
595 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800596 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800597 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
598 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800599}
600
601static void mtk_intx_unmask(struct irq_data *data)
602{
603 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
developer44e30b02021-07-02 11:12:14 +0800604 unsigned long flags;
developerfd40db22021-04-29 10:08:25 +0800605 u32 val;
606
developer44e30b02021-07-02 11:12:14 +0800607 raw_spin_lock_irqsave(&port->irq_lock, flags);
608 val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
developerfd40db22021-04-29 10:08:25 +0800609 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
developer44e30b02021-07-02 11:12:14 +0800610 writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
611 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
developerfd40db22021-04-29 10:08:25 +0800612}
613
developer44e30b02021-07-02 11:12:14 +0800614/**
615 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
616 * @data: pointer to chip specific data
617 *
618 * As an emulated level IRQ, its interrupt status will remain
619 * until the corresponding de-assert message is received; hence that
620 * the status can only be cleared when the interrupt has been serviced.
621 */
developerfd40db22021-04-29 10:08:25 +0800622static void mtk_intx_eoi(struct irq_data *data)
623{
624 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
625 unsigned long hwirq;
626
developerfd40db22021-04-29 10:08:25 +0800627 hwirq = data->hwirq + PCIE_INTX_SHIFT;
developer44e30b02021-07-02 11:12:14 +0800628 writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800629}
630
631static struct irq_chip mtk_intx_irq_chip = {
632 .irq_mask = mtk_intx_mask,
633 .irq_unmask = mtk_intx_unmask,
634 .irq_eoi = mtk_intx_eoi,
635 .irq_set_affinity = mtk_pcie_set_affinity,
developer44e30b02021-07-02 11:12:14 +0800636 .name = "INTx",
developerfd40db22021-04-29 10:08:25 +0800637};
638
639static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
640 irq_hw_number_t hwirq)
641{
developer44e30b02021-07-02 11:12:14 +0800642 irq_set_chip_data(irq, domain->host_data);
developerfd40db22021-04-29 10:08:25 +0800643 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
644 handle_fasteoi_irq, "INTx");
developerfd40db22021-04-29 10:08:25 +0800645 return 0;
646}
647
648static const struct irq_domain_ops intx_domain_ops = {
649 .map = mtk_pcie_intx_map,
650};
651
developer44e30b02021-07-02 11:12:14 +0800652static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800653{
654 struct device *dev = port->dev;
developer44e30b02021-07-02 11:12:14 +0800655 struct device_node *intc_node, *node = dev->of_node;
656 int ret;
657
658 raw_spin_lock_init(&port->irq_lock);
developerfd40db22021-04-29 10:08:25 +0800659
660 /* Setup INTx */
661 intc_node = of_get_child_by_name(node, "interrupt-controller");
662 if (!intc_node) {
developer44e30b02021-07-02 11:12:14 +0800663 dev_err(dev, "missing interrupt-controller node\n");
developerfd40db22021-04-29 10:08:25 +0800664 return -ENODEV;
665 }
666
667 port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
668 &intx_domain_ops, port);
669 if (!port->intx_domain) {
developer44e30b02021-07-02 11:12:14 +0800670 dev_err(dev, "failed to create INTx IRQ domain\n");
developerfd40db22021-04-29 10:08:25 +0800671 return -ENODEV;
672 }
673
674 /* Setup MSI */
675 mutex_init(&port->lock);
676
developer44e30b02021-07-02 11:12:14 +0800677 port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
678 &mtk_msi_bottom_domain_ops, port);
679 if (!port->msi_bottom_domain) {
680 dev_err(dev, "failed to create MSI bottom domain\n");
developerfd40db22021-04-29 10:08:25 +0800681 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800682 goto err_msi_bottom_domain;
developerfd40db22021-04-29 10:08:25 +0800683 }
684
developer44e30b02021-07-02 11:12:14 +0800685 port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
686 &mtk_msi_domain_info,
687 port->msi_bottom_domain);
688 if (!port->msi_domain) {
689 dev_err(dev, "failed to create MSI domain\n");
developerfd40db22021-04-29 10:08:25 +0800690 ret = -ENODEV;
developer44e30b02021-07-02 11:12:14 +0800691 goto err_msi_domain;
developerfd40db22021-04-29 10:08:25 +0800692 }
693
developer63dcf012021-09-02 10:14:03 +0800694 if (of_find_property(node, "direct_msi", NULL))
695 port->direct_msi_enable = true;
696 else
697 port->direct_msi_enable = false;
698
developerfd40db22021-04-29 10:08:25 +0800699 return 0;
700
developerfd40db22021-04-29 10:08:25 +0800701err_msi_domain:
developer44e30b02021-07-02 11:12:14 +0800702 irq_domain_remove(port->msi_bottom_domain);
703err_msi_bottom_domain:
developerfd40db22021-04-29 10:08:25 +0800704 irq_domain_remove(port->intx_domain);
705
706 return ret;
707}
708
709static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
710{
developerfd40db22021-04-29 10:08:25 +0800711 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
712
713 if (port->intx_domain)
714 irq_domain_remove(port->intx_domain);
715
716 if (port->msi_domain)
717 irq_domain_remove(port->msi_domain);
718
developer44e30b02021-07-02 11:12:14 +0800719 if (port->msi_bottom_domain)
720 irq_domain_remove(port->msi_bottom_domain);
developerfd40db22021-04-29 10:08:25 +0800721
722 irq_dispose_mapping(port->irq);
723}
724
developer44e30b02021-07-02 11:12:14 +0800725static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
726{
727 struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
728 unsigned long msi_enable, msi_status;
729 unsigned int virq;
730 irq_hw_number_t bit, hwirq;
731
732 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
733
734 do {
735 msi_status = readl_relaxed(msi_set->base +
736 PCIE_MSI_SET_STATUS_OFFSET);
737 msi_status &= msi_enable;
738 if (!msi_status)
739 break;
740
741 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
742 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
743 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
744 generic_handle_irq(virq);
745 }
746 } while (true);
747}
748
developerfd40db22021-04-29 10:08:25 +0800749static void mtk_pcie_irq_handler(struct irq_desc *desc)
750{
751 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
752 struct irq_chip *irqchip = irq_desc_get_chip(desc);
753 unsigned long status;
754 unsigned int virq;
755 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
756
757 chained_irq_enter(irqchip, desc);
758
developer44e30b02021-07-02 11:12:14 +0800759 status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
760 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
761 PCIE_INTX_SHIFT) {
762 virq = irq_find_mapping(port->intx_domain,
763 irq_bit - PCIE_INTX_SHIFT);
764 generic_handle_irq(virq);
developerfd40db22021-04-29 10:08:25 +0800765 }
766
developer44e30b02021-07-02 11:12:14 +0800767 irq_bit = PCIE_MSI_SHIFT;
768 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
769 PCIE_MSI_SHIFT) {
770 mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
771
772 writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
developerfd40db22021-04-29 10:08:25 +0800773 }
774
775 chained_irq_exit(irqchip, desc);
776}
777
developer63dcf012021-09-02 10:14:03 +0800778static void mtk_pcie_direct_msi_handler(struct irq_desc *desc)
779{
780 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
781 struct irq_chip *irqchip = irq_desc_get_chip(desc);
782 unsigned long msi_enable, msi_status;
783 unsigned int virq;
784 irq_hw_number_t hwirq;
785 int i, msi_bit = -EINVAL;
786
787 for (i = 0; i < PCIE_MSI_IRQS_PER_SET; i++) {
788 if (port->direct_msi[i] == irq_desc_get_irq(desc)) {
789 msi_bit = i;
790 break;
791 }
792 }
793
794 if (msi_bit == -EINVAL)
795 return;
796
797 chained_irq_enter(irqchip, desc);
798
799 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
800 struct mtk_msi_set *msi_set = &port->msi_sets[i];
801
802 msi_status = readl_relaxed(msi_set->base +
803 PCIE_MSI_SET_STATUS_OFFSET);
804 msi_enable = readl_relaxed(msi_set->base +
805 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
806 msi_status &= msi_enable;
807 msi_status &= BIT(msi_bit);
808 if (!msi_status)
809 continue;
810
811 hwirq = msi_bit + i * PCIE_MSI_IRQS_PER_SET;
812 virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
813 generic_handle_irq(virq);
814 }
815
816 chained_irq_exit(irqchip, desc);
817}
818
developer44e30b02021-07-02 11:12:14 +0800819static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800820{
821 struct device *dev = port->dev;
822 struct platform_device *pdev = to_platform_device(dev);
developer63dcf012021-09-02 10:14:03 +0800823 int err, i;
developerfd40db22021-04-29 10:08:25 +0800824
developer44e30b02021-07-02 11:12:14 +0800825 err = mtk_pcie_init_irq_domains(port);
826 if (err)
developerfd40db22021-04-29 10:08:25 +0800827 return err;
developerfd40db22021-04-29 10:08:25 +0800828
829 port->irq = platform_get_irq(pdev, 0);
830 if (port->irq < 0)
831 return port->irq;
832
833 irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
834
developer63dcf012021-09-02 10:14:03 +0800835 if (port->direct_msi_enable) {
836 mtk_msi_bottom_irq_chip.irq_set_affinity =
837 mtk_pcie_set_msi_affinity;
838
839 for (i = 0; i < PCIE_MSI_IRQS_PER_SET; i++) {
840 port->direct_msi[i] = platform_get_irq(pdev, i + 1);
841 irq_set_chained_handler_and_data(port->direct_msi[i],
842 mtk_pcie_direct_msi_handler, port);
843 }
844 }
845
developerfd40db22021-04-29 10:08:25 +0800846 return 0;
847}
848
developer44e30b02021-07-02 11:12:14 +0800849static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
developerfd40db22021-04-29 10:08:25 +0800850{
developer44e30b02021-07-02 11:12:14 +0800851 struct device *dev = port->dev;
852 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
853 struct platform_device *pdev = to_platform_device(dev);
854 struct list_head *windows = &host->windows;
855 struct resource *regs, *bus;
developerfd40db22021-04-29 10:08:25 +0800856 int ret;
857
developer44e30b02021-07-02 11:12:14 +0800858 ret = pci_parse_request_of_pci_ranges(dev, windows, &bus);
859 if (ret) {
860 dev_err(dev, "failed to parse pci ranges\n");
861 return ret;
developerfd40db22021-04-29 10:08:25 +0800862 }
863
developer44e30b02021-07-02 11:12:14 +0800864 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
865 port->base = devm_ioremap_resource(dev, regs);
866 if (IS_ERR(port->base)) {
867 dev_err(dev, "failed to map register base\n");
868 return PTR_ERR(port->base);
869 }
870
871 port->reg_base = regs->start;
872
873 port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
874 if (IS_ERR(port->phy_reset)) {
875 ret = PTR_ERR(port->phy_reset);
876 if (ret != -EPROBE_DEFER)
877 dev_err(dev, "failed to get PHY reset\n");
878
879 return ret;
880 }
881
882 port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
883 if (IS_ERR(port->mac_reset)) {
884 ret = PTR_ERR(port->mac_reset);
885 if (ret != -EPROBE_DEFER)
886 dev_err(dev, "failed to get MAC reset\n");
887
developerfd40db22021-04-29 10:08:25 +0800888 return ret;
889 }
890
developer44e30b02021-07-02 11:12:14 +0800891 port->phy = devm_phy_optional_get(dev, "pcie-phy");
892 if (IS_ERR(port->phy)) {
893 ret = PTR_ERR(port->phy);
894 if (ret != -EPROBE_DEFER)
895 dev_err(dev, "failed to get PHY\n");
896
897 return ret;
898 }
899
900 port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
901 if (port->num_clks < 0) {
902 dev_err(dev, "failed to get clocks\n");
903 return port->num_clks;
904 }
905
developerca1c6b22023-04-26 19:51:06 +0800906 port->max_link_width = of_pci_get_max_link_width(dev->of_node);
907 if (port->max_link_width < 0)
908 dev_err(dev, "failed to get max link width\n");
909
developerfd40db22021-04-29 10:08:25 +0800910 return 0;
911}
912
913static int mtk_pcie_power_up(struct mtk_pcie_port *port)
914{
915 struct device *dev = port->dev;
916 int err;
917
developerfd40db22021-04-29 10:08:25 +0800918 /* PHY power on and enable pipe clock */
developerfd40db22021-04-29 10:08:25 +0800919 reset_control_deassert(port->phy_reset);
920
developerfd40db22021-04-29 10:08:25 +0800921 err = phy_init(port->phy);
922 if (err) {
developer44e30b02021-07-02 11:12:14 +0800923 dev_err(dev, "failed to initialize PHY\n");
developerfd40db22021-04-29 10:08:25 +0800924 goto err_phy_init;
925 }
926
developer44e30b02021-07-02 11:12:14 +0800927 err = phy_power_on(port->phy);
928 if (err) {
929 dev_err(dev, "failed to power on PHY\n");
930 goto err_phy_on;
developerfd40db22021-04-29 10:08:25 +0800931 }
932
developer44e30b02021-07-02 11:12:14 +0800933 /* MAC power on and enable transaction layer clocks */
developerfd40db22021-04-29 10:08:25 +0800934 reset_control_deassert(port->mac_reset);
935
developerfd40db22021-04-29 10:08:25 +0800936 pm_runtime_enable(dev);
937 pm_runtime_get_sync(dev);
938
developer44e30b02021-07-02 11:12:14 +0800939 err = clk_bulk_prepare_enable(port->num_clks, port->clks);
developerfd40db22021-04-29 10:08:25 +0800940 if (err) {
developer44e30b02021-07-02 11:12:14 +0800941 dev_err(dev, "failed to enable clocks\n");
developerfd40db22021-04-29 10:08:25 +0800942 goto err_clk_init;
943 }
944
945 return 0;
946
947err_clk_init:
948 pm_runtime_put_sync(dev);
949 pm_runtime_disable(dev);
950 reset_control_assert(port->mac_reset);
developerfd40db22021-04-29 10:08:25 +0800951 phy_power_off(port->phy);
952err_phy_on:
developer44e30b02021-07-02 11:12:14 +0800953 phy_exit(port->phy);
954err_phy_init:
developerfd40db22021-04-29 10:08:25 +0800955 reset_control_assert(port->phy_reset);
956
957 return err;
958}
959
960static void mtk_pcie_power_down(struct mtk_pcie_port *port)
961{
962 clk_bulk_disable_unprepare(port->num_clks, port->clks);
963
964 pm_runtime_put_sync(port->dev);
965 pm_runtime_disable(port->dev);
966 reset_control_assert(port->mac_reset);
967
968 phy_power_off(port->phy);
969 phy_exit(port->phy);
970 reset_control_assert(port->phy_reset);
971}
972
973static int mtk_pcie_setup(struct mtk_pcie_port *port)
974{
developerfd40db22021-04-29 10:08:25 +0800975 int err;
976
developer44e30b02021-07-02 11:12:14 +0800977 err = mtk_pcie_parse_port(port);
developerfd40db22021-04-29 10:08:25 +0800978 if (err)
979 return err;
980
developerfd40db22021-04-29 10:08:25 +0800981 /* Don't touch the hardware registers before power up */
982 err = mtk_pcie_power_up(port);
983 if (err)
984 return err;
985
986 /* Try link up */
987 err = mtk_pcie_startup_port(port);
developer44e30b02021-07-02 11:12:14 +0800988 if (err)
developerfd40db22021-04-29 10:08:25 +0800989 goto err_setup;
developerfd40db22021-04-29 10:08:25 +0800990
developer44e30b02021-07-02 11:12:14 +0800991 err = mtk_pcie_setup_irq(port);
developerfd40db22021-04-29 10:08:25 +0800992 if (err)
993 goto err_setup;
994
developerfd40db22021-04-29 10:08:25 +0800995 return 0;
996
997err_setup:
998 mtk_pcie_power_down(port);
999
1000 return err;
1001}
1002
developerfd40db22021-04-29 10:08:25 +08001003static int mtk_pcie_probe(struct platform_device *pdev)
1004{
1005 struct device *dev = &pdev->dev;
1006 struct mtk_pcie_port *port;
1007 struct pci_host_bridge *host;
1008 int err;
1009
1010 host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
1011 if (!host)
1012 return -ENOMEM;
1013
1014 port = pci_host_bridge_priv(host);
1015
1016 port->dev = dev;
1017 platform_set_drvdata(pdev, port);
1018
1019 err = mtk_pcie_setup(port);
1020 if (err)
developer44e30b02021-07-02 11:12:14 +08001021 return err;
developerfd40db22021-04-29 10:08:25 +08001022
developerfd40db22021-04-29 10:08:25 +08001023 host->dev.parent = port->dev;
developer44e30b02021-07-02 11:12:14 +08001024 host->ops = &mtk_pcie_ops;
developerfd40db22021-04-29 10:08:25 +08001025 host->map_irq = of_irq_parse_and_map_pci;
1026 host->swizzle_irq = pci_common_swizzle;
developerfd40db22021-04-29 10:08:25 +08001027 host->sysdata = port;
1028
1029 err = pci_host_probe(host);
1030 if (err) {
1031 mtk_pcie_irq_teardown(port);
1032 mtk_pcie_power_down(port);
developer44e30b02021-07-02 11:12:14 +08001033 return err;
developerfd40db22021-04-29 10:08:25 +08001034 }
1035
1036 return 0;
developerfd40db22021-04-29 10:08:25 +08001037}
1038
1039static int mtk_pcie_remove(struct platform_device *pdev)
1040{
1041 struct mtk_pcie_port *port = platform_get_drvdata(pdev);
1042 struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
1043
1044 pci_lock_rescan_remove();
1045 pci_stop_root_bus(host->bus);
1046 pci_remove_root_bus(host->bus);
1047 pci_unlock_rescan_remove();
1048
1049 mtk_pcie_irq_teardown(port);
1050 mtk_pcie_power_down(port);
1051
1052 return 0;
1053}
1054
developer44e30b02021-07-02 11:12:14 +08001055static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
1056{
1057 int i;
1058
1059 raw_spin_lock(&port->irq_lock);
1060
1061 port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
1062
1063 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1064 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1065
developer63dcf012021-09-02 10:14:03 +08001066 if (port->direct_msi_enable)
1067 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1068 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
1069 else
1070 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1071 PCIE_MSI_SET_ENABLE_OFFSET);
developer44e30b02021-07-02 11:12:14 +08001072 }
1073
1074 raw_spin_unlock(&port->irq_lock);
1075}
1076
1077static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
1078{
1079 int i;
1080
1081 raw_spin_lock(&port->irq_lock);
1082
1083 writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
1084
1085 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1086 struct mtk_msi_set *msi_set = &port->msi_sets[i];
1087
developer63dcf012021-09-02 10:14:03 +08001088 if (port->direct_msi_enable)
1089 writel_relaxed(msi_set->saved_irq_state, msi_set->base +
1090 PCIE_MSI_SET_GRP1_ENABLE_OFFSET);
1091 else
1092 writel_relaxed(msi_set->saved_irq_state, msi_set->base +
1093 PCIE_MSI_SET_ENABLE_OFFSET);
developer44e30b02021-07-02 11:12:14 +08001094 }
1095
1096 raw_spin_unlock(&port->irq_lock);
1097}
1098
developerfd40db22021-04-29 10:08:25 +08001099static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
1100{
1101 u32 val;
1102
developer44e30b02021-07-02 11:12:14 +08001103 val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001104 val |= PCIE_TURN_OFF_LINK;
developer44e30b02021-07-02 11:12:14 +08001105 writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
developerfd40db22021-04-29 10:08:25 +08001106
1107 /* Check the link is L2 */
1108 return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
1109 (PCIE_LTSSM_STATE(val) ==
1110 PCIE_LTSSM_STATE_L2_IDLE), 20,
1111 50 * USEC_PER_MSEC);
1112}
1113
1114static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
1115{
1116 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1117 int err;
1118 u32 val;
1119
1120 /* Trigger link to L2 state */
1121 err = mtk_pcie_turn_off_link(port);
1122 if (err) {
developer44e30b02021-07-02 11:12:14 +08001123 dev_err(port->dev, "cannot enter L2 state\n");
developerfd40db22021-04-29 10:08:25 +08001124 return err;
1125 }
1126
1127 /* Pull down the PERST# pin */
developer44e30b02021-07-02 11:12:14 +08001128 val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001129 val |= PCIE_PE_RSTB;
developer44e30b02021-07-02 11:12:14 +08001130 writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
developerfd40db22021-04-29 10:08:25 +08001131
developer44e30b02021-07-02 11:12:14 +08001132 dev_dbg(port->dev, "entered L2 states successfully");
developerfd40db22021-04-29 10:08:25 +08001133
developer44e30b02021-07-02 11:12:14 +08001134 mtk_pcie_irq_save(port);
1135 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001136
1137 return 0;
1138}
1139
1140static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
1141{
1142 struct mtk_pcie_port *port = dev_get_drvdata(dev);
1143 int err;
1144
developer44e30b02021-07-02 11:12:14 +08001145 err = mtk_pcie_power_up(port);
1146 if (err)
developerfd40db22021-04-29 10:08:25 +08001147 return err;
developerfd40db22021-04-29 10:08:25 +08001148
1149 err = mtk_pcie_startup_port(port);
1150 if (err) {
developer44e30b02021-07-02 11:12:14 +08001151 mtk_pcie_power_down(port);
developerfd40db22021-04-29 10:08:25 +08001152 return err;
1153 }
1154
developer44e30b02021-07-02 11:12:14 +08001155 mtk_pcie_irq_restore(port);
developerfd40db22021-04-29 10:08:25 +08001156
1157 return 0;
1158}
1159
1160static const struct dev_pm_ops mtk_pcie_pm_ops = {
1161 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1162 mtk_pcie_resume_noirq)
1163};
1164
1165static const struct of_device_id mtk_pcie_of_match[] = {
1166 { .compatible = "mediatek,mt8192-pcie" },
developer44e30b02021-07-02 11:12:14 +08001167 { .compatible = "mediatek,mt7986-pcie" },
developerfd40db22021-04-29 10:08:25 +08001168 {},
1169};
1170
1171static struct platform_driver mtk_pcie_driver = {
1172 .probe = mtk_pcie_probe,
1173 .remove = mtk_pcie_remove,
1174 .driver = {
1175 .name = "mtk-pcie",
1176 .of_match_table = mtk_pcie_of_match,
1177 .pm = &mtk_pcie_pm_ops,
1178 },
1179};
1180
1181module_platform_driver(mtk_pcie_driver);
1182MODULE_LICENSE("GPL v2");