blob: 60534a2d28d36a4f548be6c1e3a5a1df50d15472 [file] [log] [blame]
developer28d70382019-12-19 15:58:20 +08001/*
developerbdeb0ba2022-07-08 14:48:56 +08002 * Copyright (c) 2020-2022, MediaTek Inc. All rights reserved.
developer28d70382019-12-19 15:58:20 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <common/debug.h>
9#include <drivers/arm/gic_common.h>
developer28d70382019-12-19 15:58:20 +080010#include <lib/mmio.h>
11
developerbdeb0ba2022-07-08 14:48:56 +080012#include <mt_cirq.h>
developer28d70382019-12-19 15:58:20 +080013#include <mt_gic_v3.h>
developer28d70382019-12-19 15:58:20 +080014
15static struct cirq_events cirq_all_events = {
developerb63f6b22020-11-06 09:20:25 +080016 .spi_start = CIRQ_SPI_START,
developer28d70382019-12-19 15:58:20 +080017};
developerb63f6b22020-11-06 09:20:25 +080018static uint32_t already_cloned;
developer28d70382019-12-19 15:58:20 +080019/*
developerb63f6b22020-11-06 09:20:25 +080020 * mt_irq_mask_restore: restore all interrupts
21 * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
22 * Return 0 for success; return negative values for failure.
23 * (This is ONLY used for the idle current measurement by the factory mode.)
developer28d70382019-12-19 15:58:20 +080024 */
developerb63f6b22020-11-06 09:20:25 +080025int mt_irq_mask_restore(struct mtk_irq_mask *mask)
26{
27 if (mask == NULL) {
28 return -1;
29 }
30 if (mask->header != IRQ_MASK_HEADER) {
31 return -1;
32 }
33 if (mask->footer != IRQ_MASK_FOOTER) {
34 return -1;
35 }
developer28d70382019-12-19 15:58:20 +080036
developerb63f6b22020-11-06 09:20:25 +080037 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x4),
38 mask->mask1);
39 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x8),
40 mask->mask2);
41 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0xc),
42 mask->mask3);
43 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x10),
44 mask->mask4);
45 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x14),
46 mask->mask5);
47 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x18),
48 mask->mask6);
49 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x1c),
50 mask->mask7);
51 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x20),
52 mask->mask8);
53 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x24),
54 mask->mask9);
55 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x28),
56 mask->mask10);
57 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x2c),
58 mask->mask11);
59 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x30),
60 mask->mask12);
61 /* make sure dist changes happen */
62 dsb();
developer28d70382019-12-19 15:58:20 +080063
developerb63f6b22020-11-06 09:20:25 +080064 return 0;
65}
developer28d70382019-12-19 15:58:20 +080066
67/*
developerb63f6b22020-11-06 09:20:25 +080068 * mt_irq_mask_all: disable all interrupts
69 * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
70 * Return 0 for success; return negative values for failure.
71 * (This is ONLY used for the idle current measurement by the factory mode.)
developer28d70382019-12-19 15:58:20 +080072 */
developerb63f6b22020-11-06 09:20:25 +080073int mt_irq_mask_all(struct mtk_irq_mask *mask)
developer28d70382019-12-19 15:58:20 +080074{
developerb63f6b22020-11-06 09:20:25 +080075 if (mask != NULL) {
76 /* for SPI */
77 mask->mask1 = mmio_read_32((BASE_GICD_BASE +
78 GICD_ISENABLER + 0x4));
79 mask->mask2 = mmio_read_32((BASE_GICD_BASE +
80 GICD_ISENABLER + 0x8));
81 mask->mask3 = mmio_read_32((BASE_GICD_BASE +
82 GICD_ISENABLER + 0xc));
83 mask->mask4 = mmio_read_32((BASE_GICD_BASE +
84 GICD_ISENABLER + 0x10));
85 mask->mask5 = mmio_read_32((BASE_GICD_BASE +
86 GICD_ISENABLER + 0x14));
87 mask->mask6 = mmio_read_32((BASE_GICD_BASE +
88 GICD_ISENABLER + 0x18));
89 mask->mask7 = mmio_read_32((BASE_GICD_BASE +
90 GICD_ISENABLER + 0x1c));
91 mask->mask8 = mmio_read_32((BASE_GICD_BASE +
92 GICD_ISENABLER + 0x20));
93 mask->mask9 = mmio_read_32((BASE_GICD_BASE +
94 GICD_ISENABLER + 0x24));
95 mask->mask10 = mmio_read_32((BASE_GICD_BASE +
96 GICD_ISENABLER + 0x28));
97 mask->mask11 = mmio_read_32((BASE_GICD_BASE +
98 GICD_ISENABLER + 0x2c));
99 mask->mask12 = mmio_read_32((BASE_GICD_BASE +
100 GICD_ISENABLER + 0x30));
developer28d70382019-12-19 15:58:20 +0800101
developerb63f6b22020-11-06 09:20:25 +0800102 /* for SPI */
103 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x4),
104 0xFFFFFFFF);
105 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x8),
106 0xFFFFFFFF);
107 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0xC),
108 0xFFFFFFFF);
109 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x10),
110 0xFFFFFFFF);
111 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x14),
112 0xFFFFFFFF);
113 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x18),
114 0xFFFFFFFF);
115 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x1C),
116 0xFFFFFFFF);
117 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x20),
118 0xFFFFFFFF);
119 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x24),
120 0xFFFFFFFF);
121 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x28),
122 0xFFFFFFFF);
123 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x2c),
124 0xFFFFFFFF);
125 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x30),
126 0xFFFFFFFF);
127 /* make sure distributor changes happen */
128 dsb();
129
130 mask->header = IRQ_MASK_HEADER;
131 mask->footer = IRQ_MASK_FOOTER;
132
133 return 0;
134 } else {
135 return -1;
developer28d70382019-12-19 15:58:20 +0800136 }
developer28d70382019-12-19 15:58:20 +0800137}
138
developerb63f6b22020-11-06 09:20:25 +0800139static uint32_t mt_irq_get_pol(uint32_t irq)
developer28d70382019-12-19 15:58:20 +0800140{
developerb63f6b22020-11-06 09:20:25 +0800141#ifdef CIRQ_WITH_POLARITY
142 uint32_t reg;
143 uint32_t base = INT_POL_CTL0;
developer28d70382019-12-19 15:58:20 +0800144
developerb63f6b22020-11-06 09:20:25 +0800145 if (irq < 32U) {
146 return 0;
147 }
developer28d70382019-12-19 15:58:20 +0800148
developerb63f6b22020-11-06 09:20:25 +0800149 reg = ((irq - 32U) / 32U);
developer28d70382019-12-19 15:58:20 +0800150
developerb63f6b22020-11-06 09:20:25 +0800151 return mmio_read_32(base + reg * 4U);
152#else
153 return 0;
154#endif
developer28d70382019-12-19 15:58:20 +0800155}
156
developerb63f6b22020-11-06 09:20:25 +0800157unsigned int mt_irq_get_sens(unsigned int irq)
developer28d70382019-12-19 15:58:20 +0800158{
developerb63f6b22020-11-06 09:20:25 +0800159 unsigned int config;
developer28d70382019-12-19 15:58:20 +0800160
developerb63f6b22020-11-06 09:20:25 +0800161 /*
162 * 2'b10 edge
163 * 2'b01 level
164 */
165 config = mmio_read_32(MT_GIC_BASE + GICD_ICFGR + (irq / 16U) * 4U);
166 config = (config >> (irq % 16U) * 2U) & 0x3;
developer28d70382019-12-19 15:58:20 +0800167
developerb63f6b22020-11-06 09:20:25 +0800168 return config;
developer28d70382019-12-19 15:58:20 +0800169}
170
developerb63f6b22020-11-06 09:20:25 +0800171static void collect_all_wakeup_events(void)
developer28d70382019-12-19 15:58:20 +0800172{
developerb63f6b22020-11-06 09:20:25 +0800173 unsigned int i;
174 uint32_t gic_irq;
175 uint32_t cirq;
176 uint32_t cirq_reg;
177 uint32_t cirq_offset;
178 uint32_t mask;
179 uint32_t pol_mask;
180 uint32_t irq_offset;
181 uint32_t irq_mask;
developer28d70382019-12-19 15:58:20 +0800182
developerb63f6b22020-11-06 09:20:25 +0800183 if ((cirq_all_events.wakeup_events == NULL) ||
184 cirq_all_events.num_of_events == 0U) {
185 return;
developer28d70382019-12-19 15:58:20 +0800186 }
187
developerb63f6b22020-11-06 09:20:25 +0800188 for (i = 0U; i < cirq_all_events.num_of_events; i++) {
189 if (cirq_all_events.wakeup_events[i] > 0U) {
190 gic_irq = cirq_all_events.wakeup_events[i];
191 cirq = gic_irq - cirq_all_events.spi_start - 32U;
192 cirq_reg = cirq / 32U;
193 cirq_offset = cirq % 32U;
194 mask = 0x1 << cirq_offset;
195 irq_offset = gic_irq % 32U;
196 irq_mask = 0x1 << irq_offset;
197 /*
198 * CIRQ default masks all
199 */
200 cirq_all_events.table[cirq_reg].mask |= mask;
201 /*
202 * CIRQ default pol is low
203 */
204 pol_mask = mt_irq_get_pol(
205 cirq_all_events.wakeup_events[i])
206 & irq_mask;
207 /*
208 * 0 means rising
209 */
210 if (pol_mask == 0U) {
211 cirq_all_events.table[cirq_reg].pol |= mask;
212 }
213 /*
214 * CIRQ could monitor edge/level trigger
215 * cirq register (0: edge, 1: level)
216 */
217 if (mt_irq_get_sens(cirq_all_events.wakeup_events[i])
218 == SENS_EDGE) {
219 cirq_all_events.table[cirq_reg].sen |= mask;
220 }
developer28d70382019-12-19 15:58:20 +0800221
developerb63f6b22020-11-06 09:20:25 +0800222 cirq_all_events.table[cirq_reg].used = 1U;
223 cirq_all_events.table[cirq_reg].reg_num = cirq_reg;
224 }
developer28d70382019-12-19 15:58:20 +0800225 }
developer28d70382019-12-19 15:58:20 +0800226}
227
228/*
developerb63f6b22020-11-06 09:20:25 +0800229 * mt_cirq_set_pol: Set the polarity for the specified SYS_CIRQ number.
230 * @cirq_num: the SYS_CIRQ number to set
231 * @pol: polarity to set
232 * @return:
233 * 0: set pol success
234 * -1: cirq num is out of range
developer28d70382019-12-19 15:58:20 +0800235 */
developerb63f6b22020-11-06 09:20:25 +0800236#ifdef CIRQ_WITH_POLARITY
237static int mt_cirq_set_pol(uint32_t cirq_num, uint32_t pol)
developer28d70382019-12-19 15:58:20 +0800238{
developerb63f6b22020-11-06 09:20:25 +0800239 uint32_t base;
240 uint32_t bit = 1U << (cirq_num % 32U);
developer28d70382019-12-19 15:58:20 +0800241
developerb63f6b22020-11-06 09:20:25 +0800242 if (cirq_num >= CIRQ_IRQ_NUM) {
243 return -1;
244 }
245
246 if (pol == MT_CIRQ_POL_NEG) {
247 base = (cirq_num / 32U) * 4U + CIRQ_POL_CLR_BASE;
248 } else if (pol == MT_CIRQ_POL_POS) {
249 base = (cirq_num / 32U) * 4U + CIRQ_POL_SET_BASE;
250 } else {
251 return -1;
developer28d70382019-12-19 15:58:20 +0800252 }
developerb63f6b22020-11-06 09:20:25 +0800253
254 mmio_write_32(base, bit);
255 return 0;
developer28d70382019-12-19 15:58:20 +0800256}
developerb63f6b22020-11-06 09:20:25 +0800257#endif
developer28d70382019-12-19 15:58:20 +0800258
259/*
260 * mt_cirq_mask: Mask the specified SYS_CIRQ.
261 * @cirq_num: the SYS_CIRQ number to mask
262 * @return:
263 * 0: mask success
264 * -1: cirq num is out of range
265 */
266static int mt_cirq_mask(uint32_t cirq_num)
267{
268 uint32_t bit = 1U << (cirq_num % 32U);
269
270 if (cirq_num >= CIRQ_IRQ_NUM) {
developer28d70382019-12-19 15:58:20 +0800271 return -1;
272 }
273
developerb63f6b22020-11-06 09:20:25 +0800274 mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_SET_BASE, bit);
275
developer28d70382019-12-19 15:58:20 +0800276 return 0;
277}
278
279/*
280 * mt_cirq_unmask: Unmask the specified SYS_CIRQ.
281 * @cirq_num: the SYS_CIRQ number to unmask
282 * @return:
283 * 0: umask success
284 * -1: cirq num is out of range
285 */
286static int mt_cirq_unmask(uint32_t cirq_num)
287{
288 uint32_t bit = 1U << (cirq_num % 32U);
289
290 if (cirq_num >= CIRQ_IRQ_NUM) {
developer28d70382019-12-19 15:58:20 +0800291 return -1;
292 }
293
developerb63f6b22020-11-06 09:20:25 +0800294 mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_CLR_BASE, bit);
295
developer28d70382019-12-19 15:58:20 +0800296 return 0;
297}
298
developerb63f6b22020-11-06 09:20:25 +0800299uint32_t mt_irq_get_en(uint32_t irq)
developer28d70382019-12-19 15:58:20 +0800300{
developerb63f6b22020-11-06 09:20:25 +0800301 uint32_t addr, st, val;
developer28d70382019-12-19 15:58:20 +0800302
developerb63f6b22020-11-06 09:20:25 +0800303 addr = BASE_GICD_BASE + GICD_ISENABLER + (irq / 32U) * 4U;
304 st = mmio_read_32(addr);
developer28d70382019-12-19 15:58:20 +0800305
developerb63f6b22020-11-06 09:20:25 +0800306 val = (st >> (irq % 32U)) & 1U;
developer28d70382019-12-19 15:58:20 +0800307
developerb63f6b22020-11-06 09:20:25 +0800308 return val;
developer28d70382019-12-19 15:58:20 +0800309}
310
developerb63f6b22020-11-06 09:20:25 +0800311static void __cirq_fast_clone(void)
developer28d70382019-12-19 15:58:20 +0800312{
developerb63f6b22020-11-06 09:20:25 +0800313 struct cirq_reg *reg;
314 unsigned int i;
developer28d70382019-12-19 15:58:20 +0800315
developerb63f6b22020-11-06 09:20:25 +0800316 for (i = 0U; i < CIRQ_REG_NUM ; ++i) {
317 uint32_t cirq_bit;
developer28d70382019-12-19 15:58:20 +0800318
developerb63f6b22020-11-06 09:20:25 +0800319 reg = &cirq_all_events.table[i];
developer28d70382019-12-19 15:58:20 +0800320
developerb63f6b22020-11-06 09:20:25 +0800321 if (reg->used == 0U) {
322 continue;
323 }
developer28d70382019-12-19 15:58:20 +0800324
developerb63f6b22020-11-06 09:20:25 +0800325 mmio_write_32(CIRQ_SENS_CLR_BASE + (reg->reg_num * 4U),
326 reg->sen);
developer28d70382019-12-19 15:58:20 +0800327
developerb63f6b22020-11-06 09:20:25 +0800328 for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) {
329 uint32_t val, cirq_id;
330 uint32_t gic_id;
331#ifdef CIRQ_WITH_POLARITY
332 uint32_t gic_bit, pol;
333#endif
334 uint32_t en;
developer28d70382019-12-19 15:58:20 +0800335
developerb63f6b22020-11-06 09:20:25 +0800336 val = ((1U << cirq_bit) & reg->mask);
developer28d70382019-12-19 15:58:20 +0800337
developerb63f6b22020-11-06 09:20:25 +0800338 if (val == 0U) {
339 continue;
340 }
developer28d70382019-12-19 15:58:20 +0800341
developerb63f6b22020-11-06 09:20:25 +0800342 cirq_id = (reg->reg_num << 5U) + cirq_bit;
343 gic_id = CIRQ_TO_IRQ_NUM(cirq_id);
344#ifdef CIRQ_WITH_POLARITY
345 gic_bit = (0x1U << ((gic_id - 32U) % 32U));
346 pol = mt_irq_get_pol(gic_id) & gic_bit;
347 if (pol != 0U) {
348 mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_NEG);
349 } else {
350 mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_POS);
351 }
352#endif
353 en = mt_irq_get_en(gic_id);
354 if (en == 1U) {
355 mt_cirq_unmask(cirq_id);
356 } else {
357 mt_cirq_mask(cirq_id);
358 }
359 }
developer28d70382019-12-19 15:58:20 +0800360 }
developerb63f6b22020-11-06 09:20:25 +0800361}
developer28d70382019-12-19 15:58:20 +0800362
developerb63f6b22020-11-06 09:20:25 +0800363static void cirq_fast_clone(void)
364{
365 if (already_cloned == 0U) {
366 collect_all_wakeup_events();
367 already_cloned = 1U;
368 }
369 __cirq_fast_clone();
developer28d70382019-12-19 15:58:20 +0800370}
371
developerb63f6b22020-11-06 09:20:25 +0800372void set_wakeup_sources(uint32_t *list, uint32_t num_of_events)
373{
374 cirq_all_events.num_of_events = num_of_events;
375 cirq_all_events.wakeup_events = list;
376}
developer28d70382019-12-19 15:58:20 +0800377/*
developerb63f6b22020-11-06 09:20:25 +0800378 * mt_cirq_clone_gic: Copy the setting from GIC to SYS_CIRQ
developer28d70382019-12-19 15:58:20 +0800379 */
developerb63f6b22020-11-06 09:20:25 +0800380void mt_cirq_clone_gic(void)
developer28d70382019-12-19 15:58:20 +0800381{
developerb63f6b22020-11-06 09:20:25 +0800382 cirq_fast_clone();
383}
developer28d70382019-12-19 15:58:20 +0800384
developerb63f6b22020-11-06 09:20:25 +0800385uint32_t mt_irq_get_pending_vec(uint32_t start_irq)
386{
387 uint32_t base = 0U;
388 uint32_t pending_vec = 0U;
389 uint32_t reg = start_irq / 32U;
390 uint32_t LSB_num, MSB_num;
391 uint32_t LSB_vec, MSB_vec;
392
393 base = BASE_GICD_BASE;
394
395 /* if start_irq is not aligned 32, do some assembling */
396 MSB_num = start_irq % 32U;
397 if (MSB_num != 0U) {
398 LSB_num = 32U - MSB_num;
399 LSB_vec = mmio_read_32(base + GICD_ISPENDR +
400 reg * 4U) >> MSB_num;
401 MSB_vec = mmio_read_32(base + GICD_ISPENDR +
402 (reg + 1U) * 4U) << LSB_num;
403 pending_vec = MSB_vec | LSB_vec;
404 } else {
405 pending_vec = mmio_read_32(base + GICD_ISPENDR + reg * 4);
developer28d70382019-12-19 15:58:20 +0800406 }
407
developerb63f6b22020-11-06 09:20:25 +0800408 return pending_vec;
developer28d70382019-12-19 15:58:20 +0800409}
410
developerb63f6b22020-11-06 09:20:25 +0800411static int mt_cirq_get_mask_vec(unsigned int i)
412{
413 return mmio_read_32((i * 4U) + CIRQ_MASK_BASE);
414}
415
developer28d70382019-12-19 15:58:20 +0800416/*
developerb63f6b22020-11-06 09:20:25 +0800417 * mt_cirq_ack_all: Ack all the interrupt on SYS_CIRQ
developer28d70382019-12-19 15:58:20 +0800418 */
developerb63f6b22020-11-06 09:20:25 +0800419void mt_cirq_ack_all(void)
developer28d70382019-12-19 15:58:20 +0800420{
developerb63f6b22020-11-06 09:20:25 +0800421 uint32_t ack_vec, pend_vec, mask_vec;
422 unsigned int i;
developer28d70382019-12-19 15:58:20 +0800423
developerb63f6b22020-11-06 09:20:25 +0800424 for (i = 0; i < CIRQ_CTRL_REG_NUM; i++) {
425 /*
426 * if a irq is pending & not masked, don't ack it
427 * , since cirq start irq might not be 32 aligned with gic,
428 * need an exotic API to get proper vector of pending irq
429 */
430 pend_vec = mt_irq_get_pending_vec(CIRQ_SPI_START
431 + (i + 1U) * 32U);
432 mask_vec = mt_cirq_get_mask_vec(i);
433 /* those should be acked are: "not (pending & not masked)",
434 */
435 ack_vec = (~pend_vec) | mask_vec;
436 mmio_write_32(CIRQ_ACK_BASE + (i * 4U), ack_vec);
developer28d70382019-12-19 15:58:20 +0800437 }
developer28d70382019-12-19 15:58:20 +0800438
developerb63f6b22020-11-06 09:20:25 +0800439 /*
440 * make sure all cirq setting take effect
441 * before doing other things
442 */
443 dsb();
444}
developer28d70382019-12-19 15:58:20 +0800445/*
developerb63f6b22020-11-06 09:20:25 +0800446 * mt_cirq_enable: Enable SYS_CIRQ
developer28d70382019-12-19 15:58:20 +0800447 */
developerb63f6b22020-11-06 09:20:25 +0800448void mt_cirq_enable(void)
developer28d70382019-12-19 15:58:20 +0800449{
developerb63f6b22020-11-06 09:20:25 +0800450 uint32_t st;
developer28d70382019-12-19 15:58:20 +0800451
developerb63f6b22020-11-06 09:20:25 +0800452 /* level only */
453 mt_cirq_ack_all();
developer28d70382019-12-19 15:58:20 +0800454
developerb63f6b22020-11-06 09:20:25 +0800455 st = mmio_read_32(CIRQ_CON);
456 /*
457 * CIRQ could monitor edge/level trigger
458 */
459 st |= (CIRQ_CON_EN << CIRQ_CON_EN_BITS);
developer28d70382019-12-19 15:58:20 +0800460
developerb63f6b22020-11-06 09:20:25 +0800461 mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK));
developer28d70382019-12-19 15:58:20 +0800462}
463
464/*
developerb63f6b22020-11-06 09:20:25 +0800465 * mt_cirq_disable: Disable SYS_CIRQ
developer28d70382019-12-19 15:58:20 +0800466 */
developerb63f6b22020-11-06 09:20:25 +0800467void mt_cirq_disable(void)
developer28d70382019-12-19 15:58:20 +0800468{
developerb63f6b22020-11-06 09:20:25 +0800469 uint32_t st;
developer28d70382019-12-19 15:58:20 +0800470
developerb63f6b22020-11-06 09:20:25 +0800471 st = mmio_read_32(CIRQ_CON);
472 st &= ~(CIRQ_CON_EN << CIRQ_CON_EN_BITS);
473 mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK));
474}
developer28d70382019-12-19 15:58:20 +0800475
developerb63f6b22020-11-06 09:20:25 +0800476void mt_irq_unmask_for_sleep_ex(uint32_t irq)
477{
478 uint32_t mask;
developer28d70382019-12-19 15:58:20 +0800479
developerb63f6b22020-11-06 09:20:25 +0800480 mask = 1U << (irq % 32U);
developer28d70382019-12-19 15:58:20 +0800481
developerb63f6b22020-11-06 09:20:25 +0800482 mmio_write_32(BASE_GICD_BASE + GICD_ISENABLER +
483 ((irq / 32U) * 4U), mask);
developer28d70382019-12-19 15:58:20 +0800484}
485
developerb63f6b22020-11-06 09:20:25 +0800486void mt_cirq_mask_all(void)
developer28d70382019-12-19 15:58:20 +0800487{
developerb63f6b22020-11-06 09:20:25 +0800488 unsigned int i;
489
490 for (i = 0U; i < CIRQ_CTRL_REG_NUM; i++) {
491 mmio_write_32(CIRQ_MASK_SET_BASE + (i * 4U), 0xFFFFFFFF);
492 }
493 dsb();
developer28d70382019-12-19 15:58:20 +0800494}
495
developerb63f6b22020-11-06 09:20:25 +0800496static void cirq_fast_sw_flush(void)
developer28d70382019-12-19 15:58:20 +0800497{
developerb63f6b22020-11-06 09:20:25 +0800498 struct cirq_reg *reg;
developer28d70382019-12-19 15:58:20 +0800499 unsigned int i;
developer28d70382019-12-19 15:58:20 +0800500
developerb63f6b22020-11-06 09:20:25 +0800501 for (i = 0U; i < CIRQ_REG_NUM ; ++i) {
502 uint32_t cirq_bit;
developer28d70382019-12-19 15:58:20 +0800503
developerb63f6b22020-11-06 09:20:25 +0800504 reg = &cirq_all_events.table[i];
developer28d70382019-12-19 15:58:20 +0800505
developerb63f6b22020-11-06 09:20:25 +0800506 if (reg->used == 0U) {
507 continue;
developer28d70382019-12-19 15:58:20 +0800508 }
509
developerb63f6b22020-11-06 09:20:25 +0800510 reg->pending = mmio_read_32(CIRQ_STA_BASE +
511 (reg->reg_num << 2U));
512 reg->pending &= reg->mask;
513
514 for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) {
515 uint32_t val, cirq_id;
516
517 val = (1U << cirq_bit) & reg->pending;
518 if (val == 0U) {
developer28d70382019-12-19 15:58:20 +0800519 continue;
developer28d70382019-12-19 15:58:20 +0800520 }
developerb63f6b22020-11-06 09:20:25 +0800521
522 cirq_id = (reg->reg_num << 5U) + cirq_bit;
523 mt_irq_set_pending(CIRQ_TO_IRQ_NUM(cirq_id));
524 if (CIRQ_TO_IRQ_NUM(cirq_id) == MD_WDT_IRQ_BIT_ID) {
525 INFO("Set MD_WDT_IRQ pending in %s\n",
526 __func__);
developer28d70382019-12-19 15:58:20 +0800527 }
developer28d70382019-12-19 15:58:20 +0800528 }
529 }
developerb63f6b22020-11-06 09:20:25 +0800530}
developer28d70382019-12-19 15:58:20 +0800531
developerb63f6b22020-11-06 09:20:25 +0800532/*
533 * mt_cirq_disable: Flush interrupt from SYS_CIRQ to GIC
534 */
535void mt_cirq_flush(void)
536{
537 cirq_fast_sw_flush();
developer28d70382019-12-19 15:58:20 +0800538 mt_cirq_mask_all();
539 mt_cirq_ack_all();
540}
541
542void mt_cirq_sw_reset(void)
543{
544 uint32_t st;
545
developerb63f6b22020-11-06 09:20:25 +0800546 st = mmio_read_32(CIRQ_CON);
developer28d70382019-12-19 15:58:20 +0800547 st |= (CIRQ_SW_RESET << CIRQ_CON_SW_RST_BITS);
developerb63f6b22020-11-06 09:20:25 +0800548 mmio_write_32(CIRQ_CON, st);
developer28d70382019-12-19 15:58:20 +0800549}